summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/olivere
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/olivere')
-rw-r--r--vendor/github.com/olivere/elastic/.gitignore33
-rw-r--r--vendor/github.com/olivere/elastic/.travis.yml15
-rw-r--r--vendor/github.com/olivere/elastic/CHANGELOG-3.0.md363
-rw-r--r--vendor/github.com/olivere/elastic/CHANGELOG-5.0.md195
-rw-r--r--vendor/github.com/olivere/elastic/CHANGELOG-6.0.md18
-rw-r--r--vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md46
-rw-r--r--vendor/github.com/olivere/elastic/CONTRIBUTING.md40
-rw-r--r--vendor/github.com/olivere/elastic/CONTRIBUTORS128
-rw-r--r--vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md18
-rw-r--r--vendor/github.com/olivere/elastic/LICENSE20
-rw-r--r--vendor/github.com/olivere/elastic/README.md393
-rw-r--r--vendor/github.com/olivere/elastic/acknowledged_response.go13
-rw-r--r--vendor/github.com/olivere/elastic/backoff.go148
-rw-r--r--vendor/github.com/olivere/elastic/backoff_test.go140
-rw-r--r--vendor/github.com/olivere/elastic/bulk.go417
-rw-r--r--vendor/github.com/olivere/elastic/bulk_delete_request.go166
-rw-r--r--vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go230
-rw-r--r--vendor/github.com/olivere/elastic/bulk_delete_request_test.go79
-rw-r--r--vendor/github.com/olivere/elastic/bulk_index_request.go239
-rw-r--r--vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go262
-rw-r--r--vendor/github.com/olivere/elastic/bulk_index_request_test.go116
-rw-r--r--vendor/github.com/olivere/elastic/bulk_processor.go600
-rw-r--r--vendor/github.com/olivere/elastic/bulk_processor_test.go425
-rw-r--r--vendor/github.com/olivere/elastic/bulk_request.go17
-rw-r--r--vendor/github.com/olivere/elastic/bulk_test.go600
-rw-r--r--vendor/github.com/olivere/elastic/bulk_update_request.go298
-rw-r--r--vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go461
-rw-r--r--vendor/github.com/olivere/elastic/bulk_update_request_test.go149
-rw-r--r--vendor/github.com/olivere/elastic/canonicalize.go34
-rw-r--r--vendor/github.com/olivere/elastic/canonicalize_test.go72
-rw-r--r--vendor/github.com/olivere/elastic/clear_scroll.go108
-rw-r--r--vendor/github.com/olivere/elastic/clear_scroll_test.go87
-rw-r--r--vendor/github.com/olivere/elastic/client.go1780
-rw-r--r--vendor/github.com/olivere/elastic/client_test.go1319
-rw-r--r--vendor/github.com/olivere/elastic/cluster-test/Makefile16
-rw-r--r--vendor/github.com/olivere/elastic/cluster-test/README.md63
-rw-r--r--vendor/github.com/olivere/elastic/cluster-test/cluster-test.go361
-rw-r--r--vendor/github.com/olivere/elastic/cluster_health.go248
-rw-r--r--vendor/github.com/olivere/elastic/cluster_health_test.go119
-rw-r--r--vendor/github.com/olivere/elastic/cluster_state.go288
-rw-r--r--vendor/github.com/olivere/elastic/cluster_state_test.go93
-rw-r--r--vendor/github.com/olivere/elastic/cluster_stats.go352
-rw-r--r--vendor/github.com/olivere/elastic/cluster_stats_test.go92
-rw-r--r--vendor/github.com/olivere/elastic/config/config.go90
-rw-r--r--vendor/github.com/olivere/elastic/config/config_test.go45
-rw-r--r--vendor/github.com/olivere/elastic/config/doc.go9
-rw-r--r--vendor/github.com/olivere/elastic/connection.go90
-rw-r--r--vendor/github.com/olivere/elastic/count.go315
-rw-r--r--vendor/github.com/olivere/elastic/count_test.go127
-rw-r--r--vendor/github.com/olivere/elastic/decoder.go26
-rw-r--r--vendor/github.com/olivere/elastic/decoder_test.go50
-rw-r--r--vendor/github.com/olivere/elastic/delete.go226
-rw-r--r--vendor/github.com/olivere/elastic/delete_by_query.go654
-rw-r--r--vendor/github.com/olivere/elastic/delete_by_query_test.go146
-rw-r--r--vendor/github.com/olivere/elastic/delete_test.go134
-rw-r--r--vendor/github.com/olivere/elastic/doc.go51
-rw-r--r--vendor/github.com/olivere/elastic/errors.go155
-rw-r--r--vendor/github.com/olivere/elastic/errors_test.go295
-rw-r--r--vendor/github.com/olivere/elastic/etc/elasticsearch.yml15
-rw-r--r--vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep0
-rw-r--r--vendor/github.com/olivere/elastic/etc/jvm.options100
-rw-r--r--vendor/github.com/olivere/elastic/etc/log4j2.properties74
-rw-r--r--vendor/github.com/olivere/elastic/etc/scripts/.gitkeep0
-rw-r--r--vendor/github.com/olivere/elastic/example_test.go530
-rw-r--r--vendor/github.com/olivere/elastic/exists.go181
-rw-r--r--vendor/github.com/olivere/elastic/exists_test.go53
-rw-r--r--vendor/github.com/olivere/elastic/explain.go326
-rw-r--r--vendor/github.com/olivere/elastic/explain_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/fetch_source_context.go90
-rw-r--r--vendor/github.com/olivere/elastic/fetch_source_context_test.go125
-rw-r--r--vendor/github.com/olivere/elastic/field_caps.go202
-rw-r--r--vendor/github.com/olivere/elastic/field_caps_test.go146
-rw-r--r--vendor/github.com/olivere/elastic/geo_point.go48
-rw-r--r--vendor/github.com/olivere/elastic/geo_point_test.go24
-rw-r--r--vendor/github.com/olivere/elastic/get.go260
-rw-r--r--vendor/github.com/olivere/elastic/get_test.go166
-rw-r--r--vendor/github.com/olivere/elastic/highlight.go469
-rw-r--r--vendor/github.com/olivere/elastic/highlight_test.go211
-rw-r--r--vendor/github.com/olivere/elastic/index.go297
-rw-r--r--vendor/github.com/olivere/elastic/index_test.go280
-rw-r--r--vendor/github.com/olivere/elastic/indices_analyze.go284
-rw-r--r--vendor/github.com/olivere/elastic/indices_analyze_test.go85
-rw-r--r--vendor/github.com/olivere/elastic/indices_close.go159
-rw-r--r--vendor/github.com/olivere/elastic/indices_close_test.go84
-rw-r--r--vendor/github.com/olivere/elastic/indices_create.go136
-rw-r--r--vendor/github.com/olivere/elastic/indices_create_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/indices_delete.go133
-rw-r--r--vendor/github.com/olivere/elastic/indices_delete_template.go128
-rw-r--r--vendor/github.com/olivere/elastic/indices_delete_test.go23
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists.go155
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_template.go118
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_template_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_test.go23
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_type.go165
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_type_test.go135
-rw-r--r--vendor/github.com/olivere/elastic/indices_flush.go173
-rw-r--r--vendor/github.com/olivere/elastic/indices_flush_test.go70
-rw-r--r--vendor/github.com/olivere/elastic/indices_forcemerge.go193
-rw-r--r--vendor/github.com/olivere/elastic/indices_forcemerge_test.go57
-rw-r--r--vendor/github.com/olivere/elastic/indices_get.go206
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_aliases.go161
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_aliases_test.go181
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_field_mapping.go187
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go55
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_mapping.go174
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_mapping_test.go50
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_settings.go187
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_settings_test.go82
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_template.go133
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_template_test.go41
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_test.go98
-rw-r--r--vendor/github.com/olivere/elastic/indices_open.go163
-rw-r--r--vendor/github.com/olivere/elastic/indices_open_test.go23
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_alias.go302
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_alias_test.go222
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_mapping.go228
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_mapping_test.go95
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_settings.go191
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_settings_test.go95
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_template.go207
-rw-r--r--vendor/github.com/olivere/elastic/indices_refresh.go98
-rw-r--r--vendor/github.com/olivere/elastic/indices_refresh_test.go81
-rw-r--r--vendor/github.com/olivere/elastic/indices_rollover.go272
-rw-r--r--vendor/github.com/olivere/elastic/indices_rollover_test.go116
-rw-r--r--vendor/github.com/olivere/elastic/indices_segments.go237
-rw-r--r--vendor/github.com/olivere/elastic/indices_segments_test.go86
-rw-r--r--vendor/github.com/olivere/elastic/indices_shrink.go179
-rw-r--r--vendor/github.com/olivere/elastic/indices_shrink_test.go34
-rw-r--r--vendor/github.com/olivere/elastic/indices_stats.go384
-rw-r--r--vendor/github.com/olivere/elastic/indices_stats_test.go86
-rw-r--r--vendor/github.com/olivere/elastic/ingest_delete_pipeline.go129
-rw-r--r--vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/ingest_get_pipeline.go121
-rw-r--r--vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go121
-rw-r--r--vendor/github.com/olivere/elastic/ingest_put_pipeline.go158
-rw-r--r--vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go161
-rw-r--r--vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go35
-rw-r--r--vendor/github.com/olivere/elastic/inner_hit.go160
-rw-r--r--vendor/github.com/olivere/elastic/inner_hit_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/logger.go10
-rw-r--r--vendor/github.com/olivere/elastic/mget.go257
-rw-r--r--vendor/github.com/olivere/elastic/mget_test.go96
-rw-r--r--vendor/github.com/olivere/elastic/msearch.go116
-rw-r--r--vendor/github.com/olivere/elastic/msearch_test.go303
-rw-r--r--vendor/github.com/olivere/elastic/mtermvectors.go475
-rw-r--r--vendor/github.com/olivere/elastic/mtermvectors_test.go134
-rw-r--r--vendor/github.com/olivere/elastic/nodes_info.go313
-rw-r--r--vendor/github.com/olivere/elastic/nodes_info_test.go43
-rw-r--r--vendor/github.com/olivere/elastic/nodes_stats.go703
-rw-r--r--vendor/github.com/olivere/elastic/nodes_stats_test.go138
-rw-r--r--vendor/github.com/olivere/elastic/percolate_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/ping.go127
-rw-r--r--vendor/github.com/olivere/elastic/ping_test.go65
-rw-r--r--vendor/github.com/olivere/elastic/plugins.go40
-rw-r--r--vendor/github.com/olivere/elastic/plugins_test.go32
-rw-r--r--vendor/github.com/olivere/elastic/query.go13
-rw-r--r--vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go173
-rw-r--r--vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go149
-rw-r--r--vendor/github.com/olivere/elastic/recipes/connect/connect.go43
-rw-r--r--vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go161
-rw-r--r--vendor/github.com/olivere/elastic/reindex.go695
-rw-r--r--vendor/github.com/olivere/elastic/reindex_test.go401
-rw-r--r--vendor/github.com/olivere/elastic/request.go79
-rw-r--r--vendor/github.com/olivere/elastic/request_test.go72
-rw-r--r--vendor/github.com/olivere/elastic/rescore.go44
-rw-r--r--vendor/github.com/olivere/elastic/rescorer.go64
-rw-r--r--vendor/github.com/olivere/elastic/response.go41
-rw-r--r--vendor/github.com/olivere/elastic/response_test.go48
-rw-r--r--vendor/github.com/olivere/elastic/retrier.go61
-rw-r--r--vendor/github.com/olivere/elastic/retrier_test.go174
-rw-r--r--vendor/github.com/olivere/elastic/retry.go56
-rw-r--r--vendor/github.com/olivere/elastic/retry_test.go44
-rwxr-xr-xvendor/github.com/olivere/elastic/run-es.sh3
-rw-r--r--vendor/github.com/olivere/elastic/script.go127
-rw-r--r--vendor/github.com/olivere/elastic/script_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/scroll.go470
-rw-r--r--vendor/github.com/olivere/elastic/scroll_test.go387
-rw-r--r--vendor/github.com/olivere/elastic/search.go580
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs.go1520
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_children.go76
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go46
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go498
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go92
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go13
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go285
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go49
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go255
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go155
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go77
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go66
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go138
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go198
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go93
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go102
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go84
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_global.go71
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go265
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go195
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go90
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go81
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go82
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go62
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_range.go244
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go178
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go86
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go83
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go111
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go30
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go389
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go211
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go245
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go66
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go368
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go155
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go120
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go53
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go101
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go120
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go78
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go105
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_max.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_min.go100
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go131
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go78
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go130
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go78
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go143
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go102
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go113
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go132
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go30
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go134
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go90
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go124
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go114
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go114
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go393
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go132
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go125
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go124
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go113
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go113
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go903
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_test.go3416
-rw-r--r--vendor/github.com/olivere/elastic/search_collapse_builder.go68
-rw-r--r--vendor/github.com/olivere/elastic/search_collapse_builder_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_bool.go203
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_bool_test.go33
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_boosting.go97
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_boosting_test.go30
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_common_terms.go137
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_common_terms_test.go85
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_constant_score.go59
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_constant_score_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_dis_max.go104
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_dis_max_test.go28
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_exists.go49
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_exists_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fsq.go171
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go567
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fsq_test.go166
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fuzzy.go120
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go121
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_distance.go107
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go69
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_polygon.go72
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go58
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_child.go131
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_child_test.go45
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_parent.go97
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_parent_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_ids.go76
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_ids_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match.go189
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_all.go51
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_all_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_none.go39
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_none_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase.go79
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go89
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_more_like_this.go412
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go92
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_multi_match.go275
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_multi_match_test.go131
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_nested.go96
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_nested_test.go86
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_parent_id.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_parent_id_test.go52
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_percolator.go115
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_percolator_test.go65
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_prefix.go67
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go35
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_prefix_test.go45
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_query_string.go350
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_query_string_test.go46
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_range.go144
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_range_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_raw_string.go26
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_raw_string_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_regexp.go82
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_regexp_test.go47
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_script.go51
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_script_test.go45
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_simple_query_string.go185
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go87
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_slice.go53
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_slice_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_term.go58
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_term_test.go46
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_terms.go75
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_terms_set.go96
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_terms_set_test.go75
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_terms_test.go82
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_type.go26
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_type_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_wildcard.go81
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_wildcard_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/search_request.go211
-rw-r--r--vendor/github.com/olivere/elastic/search_request_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_source.go546
-rw-r--r--vendor/github.com/olivere/elastic/search_source_test.go295
-rw-r--r--vendor/github.com/olivere/elastic/search_suggester_test.go355
-rw-r--r--vendor/github.com/olivere/elastic/search_terms_lookup.go74
-rw-r--r--vendor/github.com/olivere/elastic/search_terms_lookup_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_test.go1320
-rw-r--r--vendor/github.com/olivere/elastic/setup_test.go445
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create.go191
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create_repository.go205
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create_repository_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_delete_repository.go132
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go35
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_get_repository.go134
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_get_repository_test.go39
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_verify_repository.go132
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/sort.go614
-rw-r--r--vendor/github.com/olivere/elastic/sort_test.go278
-rw-r--r--vendor/github.com/olivere/elastic/suggest_field.go90
-rw-r--r--vendor/github.com/olivere/elastic/suggest_field_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/suggester.go15
-rw-r--r--vendor/github.com/olivere/elastic/suggester_completion.go352
-rw-r--r--vendor/github.com/olivere/elastic/suggester_completion_test.go110
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context.go124
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_category.go119
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_category_test.go163
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_geo.go130
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_geo_test.go48
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_test.go55
-rw-r--r--vendor/github.com/olivere/elastic/suggester_phrase.go546
-rw-r--r--vendor/github.com/olivere/elastic/suggester_phrase_test.go169
-rw-r--r--vendor/github.com/olivere/elastic/suggester_term.go233
-rw-r--r--vendor/github.com/olivere/elastic/suggester_term_test.go49
-rw-r--r--vendor/github.com/olivere/elastic/tasks_cancel.go149
-rw-r--r--vendor/github.com/olivere/elastic/tasks_cancel_test.go51
-rw-r--r--vendor/github.com/olivere/elastic/tasks_get_task.go108
-rw-r--r--vendor/github.com/olivere/elastic/tasks_get_task_test.go43
-rw-r--r--vendor/github.com/olivere/elastic/tasks_list.go231
-rw-r--r--vendor/github.com/olivere/elastic/tasks_list_test.go65
-rw-r--r--vendor/github.com/olivere/elastic/termvectors.go464
-rw-r--r--vendor/github.com/olivere/elastic/termvectors_test.go157
-rw-r--r--vendor/github.com/olivere/elastic/update.go327
-rw-r--r--vendor/github.com/olivere/elastic/update_by_query.go655
-rw-r--r--vendor/github.com/olivere/elastic/update_by_query_test.go147
-rw-r--r--vendor/github.com/olivere/elastic/update_integration_test.go58
-rw-r--r--vendor/github.com/olivere/elastic/update_test.go262
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/LICENSE18
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/uritemplates.go359
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/utils.go13
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/utils_test.go105
402 files changed, 0 insertions, 64907 deletions
diff --git a/vendor/github.com/olivere/elastic/.gitignore b/vendor/github.com/olivere/elastic/.gitignore
deleted file mode 100644
index 306ffbd83..000000000
--- a/vendor/github.com/olivere/elastic/.gitignore
+++ /dev/null
@@ -1,33 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-/.vscode/
-/debug.test
-/generator
-/cluster-test/cluster-test
-/cluster-test/*.log
-/cluster-test/es-chaos-monkey
-/spec
-/tmp
-/CHANGELOG-3.0.html
-
diff --git a/vendor/github.com/olivere/elastic/.travis.yml b/vendor/github.com/olivere/elastic/.travis.yml
deleted file mode 100644
index 9658f873a..000000000
--- a/vendor/github.com/olivere/elastic/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-sudo: required
-language: go
-script: go test -race -v . ./config
-go:
- - 1.8.x
- - 1.9.x
- # - tip
-matrix:
- allow_failures:
- - go: tip
-services:
- - docker
-before_install:
- - sudo sysctl -w vm.max_map_count=262144
- - docker run -d --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.1 elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
diff --git a/vendor/github.com/olivere/elastic/CHANGELOG-3.0.md b/vendor/github.com/olivere/elastic/CHANGELOG-3.0.md
deleted file mode 100644
index 07f3e66bf..000000000
--- a/vendor/github.com/olivere/elastic/CHANGELOG-3.0.md
+++ /dev/null
@@ -1,363 +0,0 @@
-# Elastic 3.0
-
-Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes.
-
-We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft.
-
-So, to summarize:
-
-1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained.
-2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch.
-
-The rest of the document is a list of all changes in Elastic 3.0.
-
-## Pointer types
-
-All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example:
-
-Example for Elastic 2.0 (old):
-
-```go
-q := elastic.NewMatchAllQuery()
-res, err := elastic.Search("one").Query(&q).Do() // notice the & here
-```
-
-Example for Elastic 3.0 (new):
-
-```go
-q := elastic.NewMatchAllQuery()
-res, err := elastic.Search("one").Query(q).Do() // no more &
-// ... which can be simplified as:
-res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do()
-```
-
-It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046).
-
-## Query/filter merge
-
-One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`).
-
-The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay!
-
-Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before.
-
-Example for Elastic 2.0 (old):
-
-```go
-q := elastic.NewMatchAllQuery()
-f := elastic.NewTermFilter("tag", "important")
-res, err := elastic.Search().Index("one").Query(&q).PostFilter(f)
-```
-
-Example for Elastic 3.0 (new):
-
-```go
-q := elastic.NewMatchAllQuery()
-f := elastic.NewTermQuery("tag", "important") // it's a query now!
-res, err := elastic.Search().Index("one").Query(q).PostFilter(f)
-```
-
-## Facets are removed
-
-[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now.
-
-## Errors
-
-Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer.
-
-Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59).
-
-### HTTP Status 404 (Not Found)
-
-When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0.
-
-Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error.
-
-To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below).
-
-The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0.
-
-Example for Elastic 2.0 (old):
-
-```go
-res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
-if err != nil {
- // Something else went wrong (but 404 is NOT an error in Elastic 2.0)
-}
-if !res.Found {
- // Document has not been found
-}
-```
-
-Example for Elastic 3.0 (new):
-
-```go
-res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
-if err != nil {
- if elastic.IsNotFound(err) {
- // Document has not been found
- } else {
- // Something else went wrong
- }
-}
-```
-
-### HTTP Status 408 (Timeouts)
-
-Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API.
-
-To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper.
-
-Example for Elastic 2.0 (old):
-
-```go
-health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
-if err != nil {
- // ...
-}
-if health.TimedOut {
- // We have a timeout
-}
-```
-
-Example for Elastic 3.0 (new):
-
-```go
-health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
-if elastic.IsTimeout(err) {
- // We have a timeout
-}
-```
-
-### Bulk Errors
-
-The error response of a bulk operation used to be a simple string in Elasticsearch 1.x.
-In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error.
-These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206).
-
-### Removed specific Elastic errors
-
-The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message.
-
-## Numeric types
-
-Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`.
-
-## Pluralization
-
-Some services accept zero, one or more indices or types to operate on.
-E.g. in the `SearchService` accepts a list of zero, one, or more indices to
-search and therefor had a func called `Index(index string)` and a func
-called `Indices(indices ...string)`.
-
-Elastic 3.0 now only uses the singular form that, when applicable, accepts a
-variadic type. E.g. in the case of the `SearchService`, you now only have
-one func with the following signature: `Index(indices ...string)`.
-
-Notice this is only limited to `Index(...)` and `Type(...)`. There are other
-services with variadic functions. These have not been changed.
-
-## Multiple calls to variadic functions
-
-Some services with variadic functions have cleared the underlying slice when
-called while other services just add to the existing slice. This has now been
-normalized to always add to the underlying slice.
-
-Example for Elastic 2.0 (old):
-
-```go
-// Would only cleared scroll id "two"
-// because ScrollId cleared the values when called multiple times
-client.ClearScroll().ScrollId("one").ScrollId("two").Do()
-```
-
-Example for Elastic 3.0 (new):
-
-```go
-// Now (correctly) clears both scroll id "one" and "two"
-// because ScrollId no longer clears the values when called multiple times
-client.ClearScroll().ScrollId("one").ScrollId("two").Do()
-```
-
-## Ping service requires URL
-
-The `Ping` service raised some issues because it is different from all
-other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`.
-
-Users expected to ping the cluster, but that is not possible as the cluster
-can be a set of many nodes: So which node do we ping then?
-
-To make it more clear, the `Ping` function on the client now requires users
-to explicitly set the URL of the node to ping.
-
-## Meta fields
-
-Many of the meta fields e.g. `_parent` or `_routing` are now
-[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields)
-and are no longer returned as parts of the `fields` object. We had to change
-larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0.
-
-Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default).
-
-## HasParentQuery / HasChildQuery
-
-`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API.
-
-Example for Elastic 2.0 (old):
-
-```go
-allQ := elastic.NewMatchAllQuery()
-q := elastic.NewHasChildFilter("tweet").Query(&allQ)
-```
-
-Example for Elastic 3.0 (new):
-
-```go
-q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery())
-```
-
-## SetBasicAuth client option
-
-You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html).
-
-Example:
-
-```go
-client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret"))
-if err != nil {
- log.Fatal(err)
-}
-```
-
-## Delete-by-Query API
-
-The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
-
-Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404.
-
-An older version of this document stated the following:
-
-> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed.
->
-> Example for Elastic 3.0 (new):
->
-> ```go
-> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do()
-> if err == elastic.ErrPluginNotFound {
-> // Delete By Query API is not available
-> }
-> ```
-
-I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch.
-
-If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play.
-
-## HasPlugin and SetRequiredPlugins
-
-Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
-
-You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client.
-
-Example for Elastic 3.0 (new):
-
-```go
-err, found := client.HasPlugin("delete-by-query")
-if err == nil && found {
- // ... Delete By Query API is available
-}
-```
-
-To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place.
-
-```go
-// Will raise an error if the "delete-by-query" plugin is NOT installed
-client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query"))
-if err != nil {
- log.Fatal(err)
-}
-```
-
-Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file.
-
-## Common Query has been renamed to Common Terms Query
-
-The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring).
-
-## Remove `MoreLikeThis` and `MoreLikeThisField`
-
-The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`.
-
-## Remove Filtered Query
-
-With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated).
-
-## Remove FuzzyLikeThis and FuzzyLikeThisField
-
-Both have been removed from Elasticsearch 2.0 as well.
-
-## Remove LimitFilter
-
-The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects.
-
-## Remove `_cache` and `_cache_key` from filters
-
-Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching).
-
-## Partial fields are gone
-
-Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html).
-
-## Scripting
-
-A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type.
-
-Example for Elastic 2.0 (old):
-
-```go
-update, err := client.Update().Index("twitter").Type("tweet").Id("1").
- Script("ctx._source.retweets += num").
- ScriptParams(map[string]interface{}{"num": 1}).
- Upsert(map[string]interface{}{"retweets": 0}).
- Do()
-```
-
-Example for Elastic 3.0 (new):
-
-```go
-update, err := client.Update().Index("twitter").Type("tweet").Id("1").
- Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)).
- Upsert(map[string]interface{}{"retweets": 0}).
- Do()
-```
-
-## Cluster State
-
-The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`.
-
-## Unexported structs in response
-
-Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example.
-
-## Add offset to Histogram aggregation
-
-Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option.
-
-## Services
-
-### REST API specification
-
-As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure.
-
-Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process.
-
-This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes.
-
-At the same time, the file names of the services are renamed to match the REST API specification naming.
-
-### REST API Test Suite
-
-The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well.
-
-This process in not completed though.
-
-
diff --git a/vendor/github.com/olivere/elastic/CHANGELOG-5.0.md b/vendor/github.com/olivere/elastic/CHANGELOG-5.0.md
deleted file mode 100644
index 161c6a1ce..000000000
--- a/vendor/github.com/olivere/elastic/CHANGELOG-5.0.md
+++ /dev/null
@@ -1,195 +0,0 @@
-# Changes in Elastic 5.0
-
-## Enforce context.Context in PerformRequest and Do
-
-We enforce the usage of `context.Context` everywhere you execute a request.
-You need to change all your `Do()` calls to pass a context: `Do(ctx)`.
-This enables automatic request cancelation and many other patterns.
-
-If you don't need this, simply pass `context.TODO()` or `context.Background()`.
-
-## Warmers removed
-
-Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers).
-
-## Optimize removed
-
-Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed).
-Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead.
-
-## Missing Query removed
-
-The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query).
-Use `exists` query with `must_not` in `bool` query instead.
-
-## And Query removed
-
-The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
-Use `must` clauses in a `bool` query instead.
-
-## Not Query removed
-
-TODO Is it removed?
-
-## Or Query removed
-
-The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
-Use `should` clauses in a `bool` query instead.
-
-## Filtered Query removed
-
-The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
-Use `bool` query instead, which supports `filter` clauses too.
-
-## Limit Query removed
-
-The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
-Use the `terminate_after` parameter instead.
-
-# Template Query removed
-
-The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use
-Search Templates instead.
-
-We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity
-to get rid of old stuff.
-
-## `_timestamp` and `_ttl` removed
-
-Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal).
-
-## Search template Put/Delete API returns `acknowledged` only
-
-The response type for Put/Delete search templates has changed.
-It only returns a single `acknowledged` flag now.
-
-## Fields has been renamed to Stored Fields
-
-The `fields` parameter has been renamed to `stored_fields`.
-See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter).
-
-## Fielddatafields has been renamed to Docvaluefields
-
-The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter)
-to `docvalue_fields`.
-
-## Type exists endpoint changed
-
-The endpoint for checking whether a type exists has been changed from
-`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`.
-See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal).
-
-## Refresh parameter changed
-
-The `?refresh` parameter previously could be a boolean value. It indicated
-whether changes made by a request (e.g. by the Bulk API) should be immediately
-visible in search, or not. Using `refresh=true` had the positive effect of
-immediately seeing the changes when searching; the negative effect is that
-it is a rather big performance hit.
-
-With 5.0, you now have the choice between these 3 values.
-
-* `"true"` - Refresh immediately
-* `"false"` - Do not refresh (the default value)
-* `"wait_for"` - Wait until ES made the document visible in search
-
-See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation.
-
-Notice that `true` and `false` (the boolean values) are no longer available
-now in Elastic. You must use a string instead, with one of the above values.
-
-## ReindexerService removed
-
-The `ReindexerService` was a custom solution that was started in the ES 1.x era
-to automate reindexing data, from one index to another or even between clusters.
-
-ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html)
-so we're going to remove our custom solution and ask you to use the native reindexer.
-
-The `ReindexService` is available via `client.Reindex()` (which used to point
-to the custom reindexer).
-
-## Delete By Query back in core
-
-The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html)
-was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API.
-
-It has it's own endpoint at `/_delete_by_query`.
-
-Delete By Query, Reindex, and Update By Query are very similar under the hood.
-
-## Reindex, Delete By Query, and Update By Query response changed
-
-The response from the above APIs changed a bit. E.g. the `retries` value
-used to be an `int64` and returns separate values for `bulk` and `search` now:
-
-```
-// Old
-{
- ...
- "retries": 123,
- ...
-}
-```
-
-```
-// New
-{
- ...
- "retries": {
- "bulk": 123,
- "search": 0
- },
- ...
-}
-```
-
-## ScanService removed
-
-The `ScanService` is removed. Use the (new) `ScrollService` instead.
-
-## New ScrollService
-
-There was confusion around `ScanService` and `ScrollService` doing basically
-the same. One was returning slices and didn't support all query details, the
-other returned one document after another and wasn't safe for concurrent use.
-So we merged the two and merged it into a new `ScrollService` that
-removes all the problems with the older services.
-
-In other words:
-If you used `ScanService`, switch to `ScrollService`.
-If you used the old `ScrollService`, you might need to fix some things but
-overall it should just work.
-
-Changes:
-- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll".
-
-TODO Not implemented yet
-
-## Suggesters
-
-They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html).
-
-Some changes:
-- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing).
-
-TODO Fix all structural changes in suggesters
-
-## Percolator
-
-Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html).
-
-Elastic 5.0 adds the new
-[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html)
-which can be used in combination with the new
-[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html).
-
-The Percolate service is removed from Elastic 5.0.
-
-## Remove Consistency, add WaitForActiveShards
-
-The `consistency` parameter has been removed in a lot of places, e.g. the Bulk,
-Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API.
-
-It has been replaced by a somewhat similar `wait_for_active_shards` parameter.
-See https://github.com/elastic/elasticsearch/pull/19454.
diff --git a/vendor/github.com/olivere/elastic/CHANGELOG-6.0.md b/vendor/github.com/olivere/elastic/CHANGELOG-6.0.md
deleted file mode 100644
index 277925929..000000000
--- a/vendor/github.com/olivere/elastic/CHANGELOG-6.0.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Changes from 5.0 to 6.0
-
-See [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-6.0.html).
-
-## _all removed
-
-6.0 has removed support for the `_all` field.
-
-## Boolean values coerced
-
-Only use `true` or `false` for boolean values, not `0` or `1` or `on` or `off`.
-
-## Single Type Indices
-
-Notice that 6.0 and future versions will default to single type indices, i.e. you may not use multiple types when e.g. adding an index with a mapping.
-
-See [here for details](https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_what_are_mapping_types).
-
diff --git a/vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md b/vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md
deleted file mode 100644
index acefecee5..000000000
--- a/vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at oliver@eilhard.net. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/olivere/elastic/CONTRIBUTING.md b/vendor/github.com/olivere/elastic/CONTRIBUTING.md
deleted file mode 100644
index 4fbc79dd0..000000000
--- a/vendor/github.com/olivere/elastic/CONTRIBUTING.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# How to contribute
-
-Elastic is an open-source project and we are looking forward to each
-contribution.
-
-Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level
-overview of the features of Elasticsearch. However, Elastic tries to resemble
-the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch).
-
-This explains why you might think that some options are strange or missing
-in Elastic, while often they're just different. Please check the Java API first.
-
-Having said that: Elasticsearch is moving fast and it might be very likely
-that we missed some features or changes. Feel free to change that.
-
-## Your Pull Request
-
-To make it easy to review and understand your changes, please keep the
-following things in mind before submitting your pull request:
-
-* You compared the existing implemenation with the Java API, did you?
-* Please work on the latest possible state of `olivere/elastic`.
- Use `release-branch.v2` for targeting Elasticsearch 1.x and
- `release-branch.v3` for targeting 2.x.
-* Create a branch dedicated to your change.
-* If possible, write a test case which confirms your change.
-* Make sure your changes and your tests work with all recent versions of
- Elasticsearch. We currently support Elasticsearch 1.7.x in the
- release-branch.v2 and Elasticsearch 2.x in the release-branch.v3.
-* Test your changes before creating a pull request (`go test ./...`).
-* Don't mix several features or bug fixes in one pull request.
-* Create a meaningful commit message.
-* Explain your change, e.g. provide a link to the issue you are fixing and
- probably a link to the Elasticsearch documentation and/or source code.
-* Format your source with `go fmt`.
-
-## Additional Resources
-
-* [GitHub documentation](http://help.github.com/)
-* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)
diff --git a/vendor/github.com/olivere/elastic/CONTRIBUTORS b/vendor/github.com/olivere/elastic/CONTRIBUTORS
deleted file mode 100644
index ba06dac29..000000000
--- a/vendor/github.com/olivere/elastic/CONTRIBUTORS
+++ /dev/null
@@ -1,128 +0,0 @@
-# This is a list of people who have contributed code
-# to the Elastic repository.
-#
-# It is just my small "thank you" to all those that helped
-# making Elastic what it is.
-#
-# Please keep this list sorted.
-
-0x6875790d0a [@huydx](https://github.com/huydx)
-Adam Alix [@adamalix](https://github.com/adamalix)
-Adam Weiner [@adamweiner](https://github.com/adamweiner)
-Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu)
-alehano [@alehano](https://github.com/alehano)
-Alex [@akotlar](https://github.com/akotlar)
-Alexander Sack [@asac](https://github.com/asac)
-Alexandre Olivier [@aliphen](https://github.com/aliphen)
-Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
-AndreKR [@AndreKR](https://github.com/AndreKR)
-André Bierlein [@ligustah](https://github.com/ligustah)
-Andrew Dunham [@andrew-d](https://github.com/andrew-d)
-Andrew Gaul [@andrewgaul](https://github.com/andrewgaul)
-Andy Walker [@alaska](https://github.com/alaska)
-Arquivei [@arquivei](https://github.com/arquivei)
-arthurgustin [@arthurgustin](https://github.com/arthurgustin)
-Benjamin Fernandes [@LotharSee](https://github.com/LotharSee)
-Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
-Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
-Brady Love [@bradylove](https://github.com/bradylove)
-Bryan Conklin [@bmconklin](https://github.com/bmconklin)
-Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
-cforbes [@cforbes](https://github.com/cforbes)
-Chris M [@tebriel](https://github.com/tebriel)
-Chris Rice [@donutmonger](https://github.com/donutmonger)
-Claudiu Olteanu [@claudiuolteanu](https://github.com/claudiuolteanu)
-Christophe Courtaut [@kri5](https://github.com/kri5)
-Connor Peet [@connor4312](https://github.com/connor4312)
-Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
-Corey Scott [@corsc](https://github.com/corsc)
-Daniel Barrett [@shendaras](https://github.com/shendaras)
-Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
-Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
-Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
-Ellison Leão [@ellisonleao](https://github.com/ellisonleao)
-Erwin [@eticzon](https://github.com/eticzon)
-Eugene Egorov [@EugeneEgorov](https://github.com/EugeneEgorov)
-Evan Shaw [@edsrzf](https://github.com/edsrzf)
-Fanfan [@wenpos](https://github.com/wenpos)
-Faolan C-P [@fcheslack](https://github.com/fcheslack)
-Filip Tepper [@filiptepper](https://github.com/filiptepper)
-Gaylord Aulke [@blafasel42](https://github.com/blafasel42)
-Gerhard Häring [@ghaering](https://github.com/ghaering)
-Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
-Guillaume J. Charmes [@creack](https://github.com/creack)
-Guiseppe [@gm42](https://github.com/gm42)
-Han Yu [@MoonighT](https://github.com/MoonighT)
-Harmen [@alicebob](https://github.com/alicebob)
-Harrison Wright [@wright8191](https://github.com/wright8191)
-Henry Clifford [@hcliff](https://github.com/hcliff)
-Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
-initialcontext [@initialcontext](https://github.com/initialcontext)
-Isaac Saldana [@isaldana](https://github.com/isaldana)
-Jack Lindamood [@cep21](https://github.com/cep21)
-Jacob [@jdelgad](https://github.com/jdelgad)
-Jayme Rotsaert [@jrots](https://github.com/jrots)
-Jeremy Canady [@jrmycanady](https://github.com/jrmycanady)
-Jim Berlage [@jimberlage](https://github.com/jimberlage)
-Joe Buck [@four2five](https://github.com/four2five)
-John Barker [@j16r](https://github.com/j16r)
-John Goodall [@jgoodall](https://github.com/jgoodall)
-John Stanford [@jxstanford](https://github.com/jxstanford)
-Jonas Groenaas Drange [@semafor](https://github.com/semafor)
-Josh Chorlton [@jchorl](https://github.com/jchorl)
-jun [@coseyo](https://github.com/coseyo)
-Junpei Tsuji [@jun06t](https://github.com/jun06t)
-kartlee [@kartlee](https://github.com/kartlee)
-Keith Hatton [@khatton-ft](https://github.com/khatton-ft)
-kel [@liketic](https://github.com/liketic)
-Kenta SUZUKI [@suzuken](https://github.com/suzuken)
-Kevin Mulvey [@kmulvey](https://github.com/kmulvey)
-Kyle Brandt [@kylebrandt](https://github.com/kylebrandt)
-Leandro Piccilli [@lpic10](https://github.com/lpic10)
-M. Zulfa Achsani [@misterciput](https://github.com/misterciput)
-Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
-Mara Kim [@autochthe](https://github.com/autochthe)
-Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
-Mark Costello [@mcos](https://github.com/mcos)
-Martin Häger [@protomouse](https://github.com/protomouse)
-Medhi Bechina [@mdzor](https://github.com/mdzor)
-mnpritula [@mnpritula](https://github.com/mnpritula)
-mosa [@mosasiru](https://github.com/mosasiru)
-naimulhaider [@naimulhaider](https://github.com/naimulhaider)
-Naoya Yoshizawa [@azihsoyn](https://github.com/azihsoyn)
-navins [@ishare](https://github.com/ishare)
-Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
-Nicholas Wolff [@nwolff](https://github.com/nwolff)
-Nick K [@utrack](https://github.com/utrack)
-Nick Whyte [@nickw444](https://github.com/nickw444)
-Nicolae Vartolomei [@nvartolomei](https://github.com/nvartolomei)
-Orne Brocaar [@brocaar](https://github.com/brocaar)
-Paul [@eyeamera](https://github.com/eyeamera)
-Pete C [@peteclark-ft](https://github.com/peteclark-ft)
-Radoslaw Wesolowski [r--w](https://github.com/r--w)
-Roman Colohanin [@zuzmic](https://github.com/zuzmic)
-Ryan Schmukler [@rschmukler](https://github.com/rschmukler)
-Ryan Wynn [@rwynn](https://github.com/rwynn)
-Sacheendra talluri [@sacheendra](https://github.com/sacheendra)
-Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
-Shalin LK [@shalinlk](https://github.com/shalinlk)
-singham [@zhaochenxiao90](https://github.com/zhaochenxiao90)
-Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic)
-Stuart Warren [@Woz](https://github.com/stuart-warren)
-Sulaiman [@salajlan](https://github.com/salajlan)
-Sundar [@sundarv85](https://github.com/sundarv85)
-Swarlston [@Swarlston](https://github.com/Swarlston)
-Take [ww24](https://github.com/ww24)
-Tetsuya Morimoto [@t2y](https://github.com/t2y)
-TimeEmit [@TimeEmit](https://github.com/timeemit)
-TusharM [@tusharm](https://github.com/tusharm)
-wangtuo [@wangtuo](https://github.com/wangtuo)
-Wédney Yuri [@wedneyyuri](https://github.com/wedneyyuri)
-wolfkdy [@wolfkdy](https://github.com/wolfkdy)
-Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb)
-Yarden Bar [@ayashjorden](https://github.com/ayashjorden)
-zakthomas [@zakthomas](https://github.com/zakthomas)
-Yuya Kusakabe [@higebu](https://github.com/higebu)
-Zach [@snowzach](https://github.com/snowzach)
-zhangxin [@visaxin](https://github.com/visaxin)
-@林 [@zplzpl](https://github.com/zplzpl)
diff --git a/vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md b/vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md
deleted file mode 100644
index 88d66cc83..000000000
--- a/vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,18 +0,0 @@
-Please use the following questions as a guideline to help me answer
-your issue/question without further inquiry. Thank you.
-
-### Which version of Elastic are you using?
-
-[ ] elastic.v2 (for Elasticsearch 1.x)
-[ ] elastic.v3 (for Elasticsearch 2.x)
-[ ] elastic.v5 (for Elasticsearch 5.x)
-[ ] elastic.v6 (for Elasticsearch 6.x)
-
-### Please describe the expected behavior
-
-
-### Please describe the actual behavior
-
-
-### Any steps to reproduce the behavior?
-
diff --git a/vendor/github.com/olivere/elastic/LICENSE b/vendor/github.com/olivere/elastic/LICENSE
deleted file mode 100644
index 8b22cdb60..000000000
--- a/vendor/github.com/olivere/elastic/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-Copyright © 2012-2015 Oliver Eilhard
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the “Software”), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
diff --git a/vendor/github.com/olivere/elastic/README.md b/vendor/github.com/olivere/elastic/README.md
deleted file mode 100644
index d0cdd7821..000000000
--- a/vendor/github.com/olivere/elastic/README.md
+++ /dev/null
@@ -1,393 +0,0 @@
-# Elastic
-
-**This is a development branch that is actively being worked on. DO NOT USE IN PRODUCTION!**
-
-Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
-[Go](http://www.golang.org/) programming language.
-
-[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v6)](https://travis-ci.org/olivere/elastic)
-[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/github.com/olivere/elastic)
-[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
-
-See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
-
-
-## Releases
-
-**The release branches (e.g. [`release-branch.v6`](https://github.com/olivere/elastic/tree/release-branch.v6))
-are actively being worked on and can break at any time.
-If you want to use stable versions of Elastic, please use a dependency manager like [dep](https://github.com/golang/dep).**
-
-Here's the version matrix:
-
-Elasticsearch version | Elastic version | Package URL | Remarks |
-----------------------|------------------|-------------|---------|
-6.x                   | 6.0             | [`github.com/olivere/elastic`](https://github.com/olivere/elastic) ([source](https://github.com/olivere/elastic/tree/release-branch.v6) [doc](http://godoc.org/github.com/olivere/elastic)) | Use a dependency manager (see below).
-5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) | Actively maintained.
-2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) | Deprecated. Please update.
-1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) | Deprecated. Please update.
-0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) | Deprecated. Please update.
-
-**Example:**
-
-You have installed Elasticsearch 6.0.0 and want to use Elastic.
-As listed above, you should use Elastic 6.0.
-
-To use the required version of Elastic in your application, it is strongly
-advised to use a tool like
-[dep](https://github.com/golang/dep)
-or
-[Glide](https://glide.sh/)
-to manage that dependency. Make sure to use a version such as `^6.0.0`.
-
-To use Elastic, simply import:
-
-```go
-import "github.com/olivere/elastic"
-```
-
-### Elastic 6.0
-
-Elastic 6.0 targets Elasticsearch 6.x which was [released on 14th November 2017](https://www.elastic.co/blog/elasticsearch-6-0-0-released).
-
-Notice that there are will be a lot of [breaking changes in Elasticsearch 6.0](https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-changes-6.0.html)
-and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v6/CHANGELOG-6.0.md)
-as we did in the transition from earlier versions of Elastic.
-
-### Elastic 5.0
-
-Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was
-[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released).
-
-Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html)
-and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md)
-as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x).
-
-Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack.
-
-### Elastic 3.0
-
-Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3).
-
-Elastic 3.0 will only get critical bug fixes. You should update to a recent version.
-
-### Elastic 2.0
-
-Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
-
-Elastic 2.0 will only get critical bug fixes. You should update to a recent version.
-
-### Elastic 1.0
-
-Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
-to a recent version.
-
-However, if you cannot update for some reason, don't worry. Version 1.0 is
-still available. All you need to do is go-get it and change your import path
-as described above.
-
-
-## Status
-
-We use Elastic in production since 2012. Elastic is stable but the API changes
-now and then. We strive for API compatibility.
-However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html)
-and we sometimes have to adapt.
-
-Having said that, there have been no big API changes that required you
-to rewrite your application big time. More often than not it's renaming APIs
-and adding/removing features so that Elastic is in sync with Elasticsearch.
-
-Elastic has been used in production with the following Elasticsearch versions:
-0.90, 1.0-1.7, and 2.0-2.4.1. Furthermore, we use [Travis CI](https://travis-ci.org/)
-to test Elastic with the most recent versions of Elasticsearch and Go.
-See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
-file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
-for the results.
-
-Elasticsearch has quite a few features. Most of them are implemented
-by Elastic. I add features and APIs as required. It's straightforward
-to implement missing pieces. I'm accepting pull requests :-)
-
-Having said that, I hope you find the project useful.
-
-
-## Getting Started
-
-The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go).
-The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
-
-You typically create one client for your app. Here's a complete example of
-creating a client, creating an index, adding a document, executing a search etc.
-
-An example is available [here](https://olivere.github.io/elastic/).
-
-Here's a [link to a complete working example for v3](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263).
-
-See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
-
-
-## API Status
-
-### Document APIs
-
-- [x] Index API
-- [x] Get API
-- [x] Delete API
-- [x] Delete By Query API
-- [x] Update API
-- [x] Update By Query API
-- [x] Multi Get API
-- [x] Bulk API
-- [x] Reindex API
-- [x] Term Vectors
-- [x] Multi termvectors API
-
-### Search APIs
-
-- [x] Search
-- [x] Search Template
-- [ ] Multi Search Template
-- [ ] Search Shards API
-- [x] Suggesters
- - [x] Term Suggester
- - [x] Phrase Suggester
- - [x] Completion Suggester
- - [x] Context Suggester
-- [x] Multi Search API
-- [x] Count API
-- [ ] Validate API
-- [x] Explain API
-- [x] Profile API
-- [x] Field Capabilities API
-
-### Aggregations
-
-- Metrics Aggregations
- - [x] Avg
- - [x] Cardinality
- - [x] Extended Stats
- - [x] Geo Bounds
- - [ ] Geo Centroid
- - [x] Max
- - [x] Min
- - [x] Percentiles
- - [x] Percentile Ranks
- - [ ] Scripted Metric
- - [x] Stats
- - [x] Sum
- - [x] Top Hits
- - [x] Value Count
-- Bucket Aggregations
- - [ ] Adjacency Matrix
- - [x] Children
- - [x] Date Histogram
- - [x] Date Range
- - [ ] Diversified Sampler
- - [x] Filter
- - [x] Filters
- - [x] Geo Distance
- - [ ] GeoHash Grid
- - [x] Global
- - [x] Histogram
- - [x] IP Range
- - [x] Missing
- - [x] Nested
- - [x] Range
- - [x] Reverse Nested
- - [x] Sampler
- - [x] Significant Terms
- - [x] Significant Text
- - [x] Terms
- - [x] Composite
-- Pipeline Aggregations
- - [x] Avg Bucket
- - [x] Derivative
- - [x] Max Bucket
- - [x] Min Bucket
- - [x] Sum Bucket
- - [x] Stats Bucket
- - [ ] Extended Stats Bucket
- - [x] Percentiles Bucket
- - [x] Moving Average
- - [x] Cumulative Sum
- - [x] Bucket Script
- - [x] Bucket Selector
- - [ ] Bucket Sort
- - [x] Serial Differencing
-- [x] Matrix Aggregations
- - [x] Matrix Stats
-- [x] Aggregation Metadata
-
-### Indices APIs
-
-- [x] Create Index
-- [x] Delete Index
-- [x] Get Index
-- [x] Indices Exists
-- [x] Open / Close Index
-- [x] Shrink Index
-- [x] Rollover Index
-- [x] Put Mapping
-- [x] Get Mapping
-- [x] Get Field Mapping
-- [x] Types Exists
-- [x] Index Aliases
-- [x] Update Indices Settings
-- [x] Get Settings
-- [x] Analyze
- - [x] Explain Analyze
-- [x] Index Templates
-- [x] Indices Stats
-- [x] Indices Segments
-- [ ] Indices Recovery
-- [ ] Indices Shard Stores
-- [ ] Clear Cache
-- [x] Flush
- - [x] Synced Flush
-- [x] Refresh
-- [x] Force Merge
-
-### cat APIs
-
-The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line.
-
-- [ ] cat aliases
-- [ ] cat allocation
-- [ ] cat count
-- [ ] cat fielddata
-- [ ] cat health
-- [ ] cat indices
-- [ ] cat master
-- [ ] cat nodeattrs
-- [ ] cat nodes
-- [ ] cat pending tasks
-- [ ] cat plugins
-- [ ] cat recovery
-- [ ] cat repositories
-- [ ] cat thread pool
-- [ ] cat shards
-- [ ] cat segments
-- [ ] cat snapshots
-- [ ] cat templates
-
-### Cluster APIs
-
-- [x] Cluster Health
-- [x] Cluster State
-- [x] Cluster Stats
-- [ ] Pending Cluster Tasks
-- [ ] Cluster Reroute
-- [ ] Cluster Update Settings
-- [x] Nodes Stats
-- [x] Nodes Info
-- [ ] Nodes Feature Usage
-- [ ] Remote Cluster Info
-- [x] Task Management API
-- [ ] Nodes hot_threads
-- [ ] Cluster Allocation Explain API
-
-### Query DSL
-
-- [x] Match All Query
-- [x] Inner hits
-- Full text queries
- - [x] Match Query
- - [x] Match Phrase Query
- - [x] Match Phrase Prefix Query
- - [x] Multi Match Query
- - [x] Common Terms Query
- - [x] Query String Query
- - [x] Simple Query String Query
-- Term level queries
- - [x] Term Query
- - [x] Terms Query
- - [x] Terms Set Query
- - [x] Range Query
- - [x] Exists Query
- - [x] Prefix Query
- - [x] Wildcard Query
- - [x] Regexp Query
- - [x] Fuzzy Query
- - [x] Type Query
- - [x] Ids Query
-- Compound queries
- - [x] Constant Score Query
- - [x] Bool Query
- - [x] Dis Max Query
- - [x] Function Score Query
- - [x] Boosting Query
-- Joining queries
- - [x] Nested Query
- - [x] Has Child Query
- - [x] Has Parent Query
- - [x] Parent Id Query
-- Geo queries
- - [ ] GeoShape Query
- - [x] Geo Bounding Box Query
- - [x] Geo Distance Query
- - [x] Geo Polygon Query
-- Specialized queries
- - [x] More Like This Query
- - [x] Script Query
- - [x] Percolate Query
-- Span queries
- - [ ] Span Term Query
- - [ ] Span Multi Term Query
- - [ ] Span First Query
- - [ ] Span Near Query
- - [ ] Span Or Query
- - [ ] Span Not Query
- - [ ] Span Containing Query
- - [ ] Span Within Query
- - [ ] Span Field Masking Query
-- [ ] Minimum Should Match
-- [ ] Multi Term Query Rewrite
-
-### Modules
-
-- Snapshot and Restore
- - [x] Repositories
- - [x] Snapshot
- - [ ] Restore
- - [ ] Snapshot status
- - [ ] Monitoring snapshot/restore status
- - [ ] Stopping currently running snapshot and restore
-
-### Sorting
-
-- [x] Sort by score
-- [x] Sort by field
-- [x] Sort by geo distance
-- [x] Sort by script
-- [x] Sort by doc
-
-### Scrolling
-
-Scrolling is supported via a `ScrollService`. It supports an iterator-like interface.
-The `ClearScroll` API is implemented as well.
-
-A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel)
-is described in the [Wiki](https://github.com/olivere/elastic/wiki).
-
-## How to contribute
-
-Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
-
-## Credits
-
-Thanks a lot for the great folks working hard on
-[Elasticsearch](https://www.elastic.co/products/elasticsearch)
-and
-[Go](https://golang.org/).
-
-Elastic uses portions of the
-[uritemplates](https://github.com/jtacoma/uritemplates) library
-by Joshua Tacoma,
-[backoff](https://github.com/cenkalti/backoff) by Cenk Altı and
-[leaktest](https://github.com/fortytw2/leaktest) by Ian Chiles.
-
-## LICENSE
-
-MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
-or the LICENSE file provided in the repository for details.
diff --git a/vendor/github.com/olivere/elastic/acknowledged_response.go b/vendor/github.com/olivere/elastic/acknowledged_response.go
deleted file mode 100644
index 2045ab85e..000000000
--- a/vendor/github.com/olivere/elastic/acknowledged_response.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// AcknowledgedResponse is returned from various APIs. It simply indicates
-// whether the operation is ack'd or not.
-type AcknowledgedResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/backoff.go b/vendor/github.com/olivere/elastic/backoff.go
deleted file mode 100644
index 736959f6d..000000000
--- a/vendor/github.com/olivere/elastic/backoff.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "math"
- "math/rand"
- "sync"
- "time"
-)
-
-// BackoffFunc specifies the signature of a function that returns the
-// time to wait before the next call to a resource. To stop retrying
-// return false in the 2nd return value.
-type BackoffFunc func(retry int) (time.Duration, bool)
-
-// Backoff allows callers to implement their own Backoff strategy.
-type Backoff interface {
- // Next implements a BackoffFunc.
- Next(retry int) (time.Duration, bool)
-}
-
-// -- ZeroBackoff --
-
-// ZeroBackoff is a fixed backoff policy whose backoff time is always zero,
-// meaning that the operation is retried immediately without waiting,
-// indefinitely.
-type ZeroBackoff struct{}
-
-// Next implements BackoffFunc for ZeroBackoff.
-func (b ZeroBackoff) Next(retry int) (time.Duration, bool) {
- return 0, true
-}
-
-// -- StopBackoff --
-
-// StopBackoff is a fixed backoff policy that always returns false for
-// Next(), meaning that the operation should never be retried.
-type StopBackoff struct{}
-
-// Next implements BackoffFunc for StopBackoff.
-func (b StopBackoff) Next(retry int) (time.Duration, bool) {
- return 0, false
-}
-
-// -- ConstantBackoff --
-
-// ConstantBackoff is a backoff policy that always returns the same delay.
-type ConstantBackoff struct {
- interval time.Duration
-}
-
-// NewConstantBackoff returns a new ConstantBackoff.
-func NewConstantBackoff(interval time.Duration) *ConstantBackoff {
- return &ConstantBackoff{interval: interval}
-}
-
-// Next implements BackoffFunc for ConstantBackoff.
-func (b *ConstantBackoff) Next(retry int) (time.Duration, bool) {
- return b.interval, true
-}
-
-// -- Exponential --
-
-// ExponentialBackoff implements the simple exponential backoff described by
-// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html.
-type ExponentialBackoff struct {
- t float64 // initial timeout (in msec)
- f float64 // exponential factor (e.g. 2)
- m float64 // maximum timeout (in msec)
-}
-
-// NewExponentialBackoff returns a ExponentialBackoff backoff policy.
-// Use initialTimeout to set the first/minimal interval
-// and maxTimeout to set the maximum wait interval.
-func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff {
- return &ExponentialBackoff{
- t: float64(int64(initialTimeout / time.Millisecond)),
- f: 2.0,
- m: float64(int64(maxTimeout / time.Millisecond)),
- }
-}
-
-// Next implements BackoffFunc for ExponentialBackoff.
-func (b *ExponentialBackoff) Next(retry int) (time.Duration, bool) {
- r := 1.0 + rand.Float64() // random number in [1..2]
- m := math.Min(r*b.t*math.Pow(b.f, float64(retry)), b.m)
- if m >= b.m {
- return 0, false
- }
- d := time.Duration(int64(m)) * time.Millisecond
- return d, true
-}
-
-// -- Simple Backoff --
-
-// SimpleBackoff takes a list of fixed values for backoff intervals.
-// Each call to Next returns the next value from that fixed list.
-// After each value is returned, subsequent calls to Next will only return
-// the last element. The values are optionally "jittered" (off by default).
-type SimpleBackoff struct {
- sync.Mutex
- ticks []int
- jitter bool
-}
-
-// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified
-// list of fixed intervals in milliseconds.
-func NewSimpleBackoff(ticks ...int) *SimpleBackoff {
- return &SimpleBackoff{
- ticks: ticks,
- jitter: false,
- }
-}
-
-// Jitter enables or disables jittering values.
-func (b *SimpleBackoff) Jitter(flag bool) *SimpleBackoff {
- b.Lock()
- b.jitter = flag
- b.Unlock()
- return b
-}
-
-// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis].
-func jitter(millis int) int {
- if millis <= 0 {
- return 0
- }
- return millis/2 + rand.Intn(millis)
-}
-
-// Next implements BackoffFunc for SimpleBackoff.
-func (b *SimpleBackoff) Next(retry int) (time.Duration, bool) {
- b.Lock()
- defer b.Unlock()
-
- if retry >= len(b.ticks) {
- return 0, false
- }
-
- ms := b.ticks[retry]
- if b.jitter {
- ms = jitter(ms)
- }
- return time.Duration(ms) * time.Millisecond, true
-}
diff --git a/vendor/github.com/olivere/elastic/backoff_test.go b/vendor/github.com/olivere/elastic/backoff_test.go
deleted file mode 100644
index eae168a12..000000000
--- a/vendor/github.com/olivere/elastic/backoff_test.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package elastic
-
-import (
- "math/rand"
- "testing"
- "time"
-)
-
-func TestZeroBackoff(t *testing.T) {
- b := ZeroBackoff{}
- _, ok := b.Next(0)
- if !ok {
- t.Fatalf("expected %v, got %v", true, ok)
- }
-}
-
-func TestStopBackoff(t *testing.T) {
- b := StopBackoff{}
- _, ok := b.Next(0)
- if ok {
- t.Fatalf("expected %v, got %v", false, ok)
- }
-}
-
-func TestConstantBackoff(t *testing.T) {
- b := NewConstantBackoff(time.Second)
- d, ok := b.Next(0)
- if !ok {
- t.Fatalf("expected %v, got %v", true, ok)
- }
- if d != time.Second {
- t.Fatalf("expected %v, got %v", time.Second, d)
- }
-}
-
-func TestSimpleBackoff(t *testing.T) {
- var tests = []struct {
- Duration time.Duration
- Continue bool
- }{
- // #0
- {
- Duration: 1 * time.Millisecond,
- Continue: true,
- },
- // #1
- {
- Duration: 2 * time.Millisecond,
- Continue: true,
- },
- // #2
- {
- Duration: 7 * time.Millisecond,
- Continue: true,
- },
- // #3
- {
- Duration: 0,
- Continue: false,
- },
- // #4
- {
- Duration: 0,
- Continue: false,
- },
- }
-
- b := NewSimpleBackoff(1, 2, 7)
-
- for i, tt := range tests {
- d, ok := b.Next(i)
- if got, want := ok, tt.Continue; got != want {
- t.Fatalf("#%d: expected %v, got %v", i, want, got)
- }
- if got, want := d, tt.Duration; got != want {
- t.Fatalf("#%d: expected %v, got %v", i, want, got)
- }
- }
-}
-
-func TestExponentialBackoff(t *testing.T) {
- rand.Seed(time.Now().UnixNano())
-
- min := time.Duration(8) * time.Millisecond
- max := time.Duration(256) * time.Millisecond
- b := NewExponentialBackoff(min, max)
-
- between := func(value time.Duration, a, b int) bool {
- x := int(value / time.Millisecond)
- return a <= x && x <= b
- }
-
- got, ok := b.Next(0)
- if !ok {
- t.Fatalf("expected %v, got %v", true, ok)
- }
- if !between(got, 8, 256) {
- t.Errorf("expected [%v..%v], got %v", 8, 256, got)
- }
-
- got, ok = b.Next(1)
- if !ok {
- t.Fatalf("expected %v, got %v", true, ok)
- }
- if !between(got, 8, 256) {
- t.Errorf("expected [%v..%v], got %v", 8, 256, got)
- }
-
- got, ok = b.Next(2)
- if !ok {
- t.Fatalf("expected %v, got %v", true, ok)
- }
- if !between(got, 8, 256) {
- t.Errorf("expected [%v..%v], got %v", 8, 256, got)
- }
-
- got, ok = b.Next(3)
- if !ok {
- t.Fatalf("expected %v, got %v", true, ok)
- }
- if !between(got, 8, 256) {
- t.Errorf("expected [%v..%v], got %v", 8, 256, got)
- }
-
- got, ok = b.Next(4)
- if !ok {
- t.Fatalf("expected %v, got %v", true, ok)
- }
- if !between(got, 8, 256) {
- t.Errorf("expected [%v..%v], got %v", 8, 256, got)
- }
-
- if _, ok := b.Next(5); ok {
- t.Fatalf("expected %v, got %v", false, ok)
- }
-
- if _, ok = b.Next(6); ok {
- t.Fatalf("expected %v, got %v", false, ok)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/bulk.go b/vendor/github.com/olivere/elastic/bulk.go
deleted file mode 100644
index f4228294f..000000000
--- a/vendor/github.com/olivere/elastic/bulk.go
+++ /dev/null
@@ -1,417 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// BulkService allows for batching bulk requests and sending them to
-// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest,
-// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch,
-// then use Do to send them to Elasticsearch.
-//
-// BulkService will be reset after each Do call. In other words, you can
-// reuse BulkService to send many batches. You do not have to create a new
-// BulkService for each batch.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for more details.
-type BulkService struct {
- client *Client
- retrier Retrier
-
- index string
- typ string
- requests []BulkableRequest
- pipeline string
- timeout string
- refresh string
- routing string
- waitForActiveShards string
- pretty bool
-
- // estimated bulk size in bytes, up to the request index sizeInBytesCursor
- sizeInBytes int64
- sizeInBytesCursor int
-}
-
-// NewBulkService initializes a new BulkService.
-func NewBulkService(client *Client) *BulkService {
- builder := &BulkService{
- client: client,
- }
- return builder
-}
-
-func (s *BulkService) reset() {
- s.requests = make([]BulkableRequest, 0)
- s.sizeInBytes = 0
- s.sizeInBytesCursor = 0
-}
-
-// Retrier allows to set specific retry logic for this BulkService.
-// If not specified, it will use the client's default retrier.
-func (s *BulkService) Retrier(retrier Retrier) *BulkService {
- s.retrier = retrier
- return s
-}
-
-// Index specifies the index to use for all batches. You may also leave
-// this blank and specify the index in the individual bulk requests.
-func (s *BulkService) Index(index string) *BulkService {
- s.index = index
- return s
-}
-
-// Type specifies the type to use for all batches. You may also leave
-// this blank and specify the type in the individual bulk requests.
-func (s *BulkService) Type(typ string) *BulkService {
- s.typ = typ
- return s
-}
-
-// Timeout is a global timeout for processing bulk requests. This is a
-// server-side timeout, i.e. it tells Elasticsearch the time after which
-// it should stop processing.
-func (s *BulkService) Timeout(timeout string) *BulkService {
- s.timeout = timeout
- return s
-}
-
-// Refresh controls when changes made by this request are made visible
-// to search. The allowed values are: "true" (refresh the relevant
-// primary and replica shards immediately), "wait_for" (wait for the
-// changes to be made visible by a refresh before applying), or "false"
-// (no refresh related actions).
-func (s *BulkService) Refresh(refresh string) *BulkService {
- s.refresh = refresh
- return s
-}
-
-// Routing specifies the routing value.
-func (s *BulkService) Routing(routing string) *BulkService {
- s.routing = routing
- return s
-}
-
-// Pipeline specifies the pipeline id to preprocess incoming documents with.
-func (s *BulkService) Pipeline(pipeline string) *BulkService {
- s.pipeline = pipeline
- return s
-}
-
-// WaitForActiveShards sets the number of shard copies that must be active
-// before proceeding with the bulk operation. Defaults to 1, meaning the
-// primary shard only. Set to `all` for all shard copies, otherwise set to
-// any non-negative value less than or equal to the total number of copies
-// for the shard (number of replicas + 1).
-func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// Pretty tells Elasticsearch whether to return a formatted JSON response.
-func (s *BulkService) Pretty(pretty bool) *BulkService {
- s.pretty = pretty
- return s
-}
-
-// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest,
-// and/or BulkDeleteRequest.
-func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
- for _, r := range requests {
- s.requests = append(s.requests, r)
- }
- return s
-}
-
-// EstimatedSizeInBytes returns the estimated size of all bulkable
-// requests added via Add.
-func (s *BulkService) EstimatedSizeInBytes() int64 {
- if s.sizeInBytesCursor == len(s.requests) {
- return s.sizeInBytes
- }
- for _, r := range s.requests[s.sizeInBytesCursor:] {
- s.sizeInBytes += s.estimateSizeInBytes(r)
- s.sizeInBytesCursor++
- }
- return s.sizeInBytes
-}
-
-// estimateSizeInBytes returns the estimates size of the given
-// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and
-// BulkDeleteRequest.
-func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 {
- lines, _ := r.Source()
- size := 0
- for _, line := range lines {
- // +1 for the \n
- size += len(line) + 1
- }
- return int64(size)
-}
-
-// NumberOfActions returns the number of bulkable requests that need to
-// be sent to Elasticsearch on the next batch.
-func (s *BulkService) NumberOfActions() int {
- return len(s.requests)
-}
-
-func (s *BulkService) bodyAsString() (string, error) {
- // Pre-allocate to reduce allocs
- buf := bytes.NewBuffer(make([]byte, 0, s.EstimatedSizeInBytes()))
-
- for _, req := range s.requests {
- source, err := req.Source()
- if err != nil {
- return "", err
- }
- for _, line := range source {
- buf.WriteString(line)
- buf.WriteByte('\n')
- }
- }
-
- return buf.String(), nil
-}
-
-// Do sends the batched requests to Elasticsearch. Note that, when successful,
-// you can reuse the BulkService for the next batch as the list of bulk
-// requests is cleared on success.
-func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) {
- // No actions?
- if s.NumberOfActions() == 0 {
- return nil, errors.New("elastic: No bulk actions to commit")
- }
-
- // Get body
- body, err := s.bodyAsString()
- if err != nil {
- return nil, err
- }
-
- // Build url
- path := "/"
- if len(s.index) > 0 {
- index, err := uritemplates.Expand("{index}", map[string]string{
- "index": s.index,
- })
- if err != nil {
- return nil, err
- }
- path += index + "/"
- }
- if len(s.typ) > 0 {
- typ, err := uritemplates.Expand("{type}", map[string]string{
- "type": s.typ,
- })
- if err != nil {
- return nil, err
- }
- path += typ + "/"
- }
- path += "_bulk"
-
- // Parameters
- params := make(url.Values)
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- if s.pipeline != "" {
- params.Set("pipeline", s.pipeline)
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
-
- // Get response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- ContentType: "application/x-ndjson",
- Retrier: s.retrier,
- })
- if err != nil {
- return nil, err
- }
-
- // Return results
- ret := new(BulkResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
-
- // Reset so the request can be reused
- s.reset()
-
- return ret, nil
-}
-
-// BulkResponse is a response to a bulk execution.
-//
-// Example:
-// {
-// "took":3,
-// "errors":false,
-// "items":[{
-// "index":{
-// "_index":"index1",
-// "_type":"tweet",
-// "_id":"1",
-// "_version":3,
-// "status":201
-// }
-// },{
-// "index":{
-// "_index":"index2",
-// "_type":"tweet",
-// "_id":"2",
-// "_version":3,
-// "status":200
-// }
-// },{
-// "delete":{
-// "_index":"index1",
-// "_type":"tweet",
-// "_id":"1",
-// "_version":4,
-// "status":200,
-// "found":true
-// }
-// },{
-// "update":{
-// "_index":"index2",
-// "_type":"tweet",
-// "_id":"2",
-// "_version":4,
-// "status":200
-// }
-// }]
-// }
-type BulkResponse struct {
- Took int `json:"took,omitempty"`
- Errors bool `json:"errors,omitempty"`
- Items []map[string]*BulkResponseItem `json:"items,omitempty"`
-}
-
-// BulkResponseItem is the result of a single bulk request.
-type BulkResponseItem struct {
- Index string `json:"_index,omitempty"`
- Type string `json:"_type,omitempty"`
- Id string `json:"_id,omitempty"`
- Version int64 `json:"_version,omitempty"`
- Result string `json:"result,omitempty"`
- Shards *shardsInfo `json:"_shards,omitempty"`
- SeqNo int64 `json:"_seq_no,omitempty"`
- PrimaryTerm int64 `json:"_primary_term,omitempty"`
- Status int `json:"status,omitempty"`
- ForcedRefresh bool `json:"forced_refresh,omitempty"`
- Error *ErrorDetails `json:"error,omitempty"`
- GetResult *GetResult `json:"get,omitempty"`
-}
-
-// Indexed returns all bulk request results of "index" actions.
-func (r *BulkResponse) Indexed() []*BulkResponseItem {
- return r.ByAction("index")
-}
-
-// Created returns all bulk request results of "create" actions.
-func (r *BulkResponse) Created() []*BulkResponseItem {
- return r.ByAction("create")
-}
-
-// Updated returns all bulk request results of "update" actions.
-func (r *BulkResponse) Updated() []*BulkResponseItem {
- return r.ByAction("update")
-}
-
-// Deleted returns all bulk request results of "delete" actions.
-func (r *BulkResponse) Deleted() []*BulkResponseItem {
- return r.ByAction("delete")
-}
-
-// ByAction returns all bulk request results of a certain action,
-// e.g. "index" or "delete".
-func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
- if r.Items == nil {
- return nil
- }
- var items []*BulkResponseItem
- for _, item := range r.Items {
- if result, found := item[action]; found {
- items = append(items, result)
- }
- }
- return items
-}
-
-// ById returns all bulk request results of a given document id,
-// regardless of the action ("index", "delete" etc.).
-func (r *BulkResponse) ById(id string) []*BulkResponseItem {
- if r.Items == nil {
- return nil
- }
- var items []*BulkResponseItem
- for _, item := range r.Items {
- for _, result := range item {
- if result.Id == id {
- items = append(items, result)
- }
- }
- }
- return items
-}
-
-// Failed returns those items of a bulk response that have errors,
-// i.e. those that don't have a status code between 200 and 299.
-func (r *BulkResponse) Failed() []*BulkResponseItem {
- if r.Items == nil {
- return nil
- }
- var errors []*BulkResponseItem
- for _, item := range r.Items {
- for _, result := range item {
- if !(result.Status >= 200 && result.Status <= 299) {
- errors = append(errors, result)
- }
- }
- }
- return errors
-}
-
-// Succeeded returns those items of a bulk response that have no errors,
-// i.e. those have a status code between 200 and 299.
-func (r *BulkResponse) Succeeded() []*BulkResponseItem {
- if r.Items == nil {
- return nil
- }
- var succeeded []*BulkResponseItem
- for _, item := range r.Items {
- for _, result := range item {
- if result.Status >= 200 && result.Status <= 299 {
- succeeded = append(succeeded, result)
- }
- }
- }
- return succeeded
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_delete_request.go b/vendor/github.com/olivere/elastic/bulk_delete_request.go
deleted file mode 100644
index e6c98c553..000000000
--- a/vendor/github.com/olivere/elastic/bulk_delete_request.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-//go:generate easyjson bulk_delete_request.go
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-)
-
-// -- Bulk delete request --
-
-// BulkDeleteRequest is a request to remove a document from Elasticsearch.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for details.
-type BulkDeleteRequest struct {
- BulkableRequest
- index string
- typ string
- id string
- parent string
- routing string
- version int64 // default is MATCH_ANY
- versionType string // default is "internal"
-
- source []string
-
- useEasyJSON bool
-}
-
-//easyjson:json
-type bulkDeleteRequestCommand map[string]bulkDeleteRequestCommandOp
-
-//easyjson:json
-type bulkDeleteRequestCommandOp struct {
- Index string `json:"_index,omitempty"`
- Type string `json:"_type,omitempty"`
- Id string `json:"_id,omitempty"`
- Parent string `json:"parent,omitempty"`
- Routing string `json:"routing,omitempty"`
- Version int64 `json:"version,omitempty"`
- VersionType string `json:"version_type,omitempty"`
-}
-
-// NewBulkDeleteRequest returns a new BulkDeleteRequest.
-func NewBulkDeleteRequest() *BulkDeleteRequest {
- return &BulkDeleteRequest{}
-}
-
-// UseEasyJSON is an experimental setting that enables serialization
-// with github.com/mailru/easyjson, which should in faster serialization
-// time and less allocations, but removed compatibility with encoding/json,
-// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
-// for details. This setting is disabled by default.
-func (r *BulkDeleteRequest) UseEasyJSON(enable bool) *BulkDeleteRequest {
- r.useEasyJSON = enable
- return r
-}
-
-// Index specifies the Elasticsearch index to use for this delete request.
-// If unspecified, the index set on the BulkService will be used.
-func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
- r.index = index
- r.source = nil
- return r
-}
-
-// Type specifies the Elasticsearch type to use for this delete request.
-// If unspecified, the type set on the BulkService will be used.
-func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
- r.typ = typ
- r.source = nil
- return r
-}
-
-// Id specifies the identifier of the document to delete.
-func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
- r.id = id
- r.source = nil
- return r
-}
-
-// Parent specifies the parent of the request, which is used in parent/child
-// mappings.
-func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest {
- r.parent = parent
- r.source = nil
- return r
-}
-
-// Routing specifies a routing value for the request.
-func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
- r.routing = routing
- r.source = nil
- return r
-}
-
-// Version indicates the version to be deleted as part of an optimistic
-// concurrency model.
-func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
- r.version = version
- r.source = nil
- return r
-}
-
-// VersionType can be "internal" (default), "external", "external_gte",
-// or "external_gt".
-func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
- r.versionType = versionType
- r.source = nil
- return r
-}
-
-// String returns the on-wire representation of the delete request,
-// concatenated as a single string.
-func (r *BulkDeleteRequest) String() string {
- lines, err := r.Source()
- if err != nil {
- return fmt.Sprintf("error: %v", err)
- }
- return strings.Join(lines, "\n")
-}
-
-// Source returns the on-wire representation of the delete request,
-// split into an action-and-meta-data line and an (optional) source line.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for details.
-func (r *BulkDeleteRequest) Source() ([]string, error) {
- if r.source != nil {
- return r.source, nil
- }
- command := bulkDeleteRequestCommand{
- "delete": bulkDeleteRequestCommandOp{
- Index: r.index,
- Type: r.typ,
- Id: r.id,
- Routing: r.routing,
- Parent: r.parent,
- Version: r.version,
- VersionType: r.versionType,
- },
- }
-
- var err error
- var body []byte
- if r.useEasyJSON {
- // easyjson
- body, err = command.MarshalJSON()
- } else {
- // encoding/json
- body, err = json.Marshal(command)
- }
- if err != nil {
- return nil, err
- }
-
- lines := []string{string(body)}
- r.source = lines
-
- return lines, nil
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go b/vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go
deleted file mode 100644
index df3452ce6..000000000
--- a/vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
-
-package elastic
-
-import (
- json "encoding/json"
- easyjson "github.com/mailru/easyjson"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
-)
-
-// suppress unused package warning
-var (
- _ *json.RawMessage
- _ *jlexer.Lexer
- _ *jwriter.Writer
- _ easyjson.Marshaler
-)
-
-func easyjson8092efb6DecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkDeleteRequestCommandOp) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeString()
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "_index":
- out.Index = string(in.String())
- case "_type":
- out.Type = string(in.String())
- case "_id":
- out.Id = string(in.String())
- case "parent":
- out.Parent = string(in.String())
- case "routing":
- out.Routing = string(in.String())
- case "version":
- out.Version = int64(in.Int64())
- case "version_type":
- out.VersionType = string(in.String())
- default:
- in.SkipRecursive()
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-func easyjson8092efb6EncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkDeleteRequestCommandOp) {
- out.RawByte('{')
- first := true
- _ = first
- if in.Index != "" {
- const prefix string = ",\"_index\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Index))
- }
- if in.Type != "" {
- const prefix string = ",\"_type\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Type))
- }
- if in.Id != "" {
- const prefix string = ",\"_id\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Id))
- }
- if in.Parent != "" {
- const prefix string = ",\"parent\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Parent))
- }
- if in.Routing != "" {
- const prefix string = ",\"routing\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Routing))
- }
- if in.Version != 0 {
- const prefix string = ",\"version\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Int64(int64(in.Version))
- }
- if in.VersionType != "" {
- const prefix string = ",\"version_type\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.VersionType))
- }
- out.RawByte('}')
-}
-
-// MarshalJSON supports json.Marshaler interface
-func (v bulkDeleteRequestCommandOp) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- easyjson8092efb6EncodeGithubComOlivereElastic(&w, v)
- return w.Buffer.BuildBytes(), w.Error
-}
-
-// MarshalEasyJSON supports easyjson.Marshaler interface
-func (v bulkDeleteRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
- easyjson8092efb6EncodeGithubComOlivereElastic(w, v)
-}
-
-// UnmarshalJSON supports json.Unmarshaler interface
-func (v *bulkDeleteRequestCommandOp) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- easyjson8092efb6DecodeGithubComOlivereElastic(&r, v)
- return r.Error()
-}
-
-// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
-func (v *bulkDeleteRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
- easyjson8092efb6DecodeGithubComOlivereElastic(l, v)
-}
-func easyjson8092efb6DecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkDeleteRequestCommand) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- in.Skip()
- } else {
- in.Delim('{')
- if !in.IsDelim('}') {
- *out = make(bulkDeleteRequestCommand)
- } else {
- *out = nil
- }
- for !in.IsDelim('}') {
- key := string(in.String())
- in.WantColon()
- var v1 bulkDeleteRequestCommandOp
- (v1).UnmarshalEasyJSON(in)
- (*out)[key] = v1
- in.WantComma()
- }
- in.Delim('}')
- }
- if isTopLevel {
- in.Consumed()
- }
-}
-func easyjson8092efb6EncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkDeleteRequestCommand) {
- if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
- out.RawString(`null`)
- } else {
- out.RawByte('{')
- v2First := true
- for v2Name, v2Value := range in {
- if v2First {
- v2First = false
- } else {
- out.RawByte(',')
- }
- out.String(string(v2Name))
- out.RawByte(':')
- (v2Value).MarshalEasyJSON(out)
- }
- out.RawByte('}')
- }
-}
-
-// MarshalJSON supports json.Marshaler interface
-func (v bulkDeleteRequestCommand) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- easyjson8092efb6EncodeGithubComOlivereElastic1(&w, v)
- return w.Buffer.BuildBytes(), w.Error
-}
-
-// MarshalEasyJSON supports easyjson.Marshaler interface
-func (v bulkDeleteRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
- easyjson8092efb6EncodeGithubComOlivereElastic1(w, v)
-}
-
-// UnmarshalJSON supports json.Unmarshaler interface
-func (v *bulkDeleteRequestCommand) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- easyjson8092efb6DecodeGithubComOlivereElastic1(&r, v)
- return r.Error()
-}
-
-// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
-func (v *bulkDeleteRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
- easyjson8092efb6DecodeGithubComOlivereElastic1(l, v)
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_delete_request_test.go b/vendor/github.com/olivere/elastic/bulk_delete_request_test.go
deleted file mode 100644
index 8635e34d1..000000000
--- a/vendor/github.com/olivere/elastic/bulk_delete_request_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "testing"
-)
-
-func TestBulkDeleteRequestSerialization(t *testing.T) {
- tests := []struct {
- Request BulkableRequest
- Expected []string
- }{
- // #0
- {
- Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1"),
- Expected: []string{
- `{"delete":{"_index":"index1","_type":"doc","_id":"1"}}`,
- },
- },
- // #1
- {
- Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Parent("2"),
- Expected: []string{
- `{"delete":{"_index":"index1","_type":"doc","_id":"1","parent":"2"}}`,
- },
- },
- // #2
- {
- Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Routing("3"),
- Expected: []string{
- `{"delete":{"_index":"index1","_type":"doc","_id":"1","routing":"3"}}`,
- },
- },
- }
-
- for i, test := range tests {
- lines, err := test.Request.Source()
- if err != nil {
- t.Fatalf("case #%d: expected no error, got: %v", i, err)
- }
- if lines == nil {
- t.Fatalf("case #%d: expected lines, got nil", i)
- }
- if len(lines) != len(test.Expected) {
- t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
- }
- for j, line := range lines {
- if line != test.Expected[j] {
- t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
- }
- }
- }
-}
-
-var bulkDeleteRequestSerializationResult string
-
-func BenchmarkBulkDeleteRequestSerialization(b *testing.B) {
- b.Run("stdlib", func(b *testing.B) {
- r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
- benchmarkBulkDeleteRequestSerialization(b, r.UseEasyJSON(false))
- })
- b.Run("easyjson", func(b *testing.B) {
- r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
- benchmarkBulkDeleteRequestSerialization(b, r.UseEasyJSON(true))
- })
-}
-
-func benchmarkBulkDeleteRequestSerialization(b *testing.B, r *BulkDeleteRequest) {
- var s string
- for n := 0; n < b.N; n++ {
- s = r.String()
- r.source = nil // Don't let caching spoil the benchmark
- }
- bulkDeleteRequestSerializationResult = s // ensure the compiler doesn't optimize
- b.ReportAllocs()
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_index_request.go b/vendor/github.com/olivere/elastic/bulk_index_request.go
deleted file mode 100644
index 321d2e25a..000000000
--- a/vendor/github.com/olivere/elastic/bulk_index_request.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-//go:generate easyjson bulk_index_request.go
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-)
-
-// BulkIndexRequest is a request to add a document to Elasticsearch.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for details.
-type BulkIndexRequest struct {
- BulkableRequest
- index string
- typ string
- id string
- opType string
- routing string
- parent string
- version int64 // default is MATCH_ANY
- versionType string // default is "internal"
- doc interface{}
- pipeline string
- retryOnConflict *int
-
- source []string
-
- useEasyJSON bool
-}
-
-//easyjson:json
-type bulkIndexRequestCommand map[string]bulkIndexRequestCommandOp
-
-//easyjson:json
-type bulkIndexRequestCommandOp struct {
- Index string `json:"_index,omitempty"`
- Id string `json:"_id,omitempty"`
- Type string `json:"_type,omitempty"`
- Parent string `json:"parent,omitempty"`
- // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
- RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
- Routing string `json:"routing,omitempty"`
- Version int64 `json:"version,omitempty"`
- VersionType string `json:"version_type,omitempty"`
- Pipeline string `json:"pipeline,omitempty"`
-}
-
-// NewBulkIndexRequest returns a new BulkIndexRequest.
-// The operation type is "index" by default.
-func NewBulkIndexRequest() *BulkIndexRequest {
- return &BulkIndexRequest{
- opType: "index",
- }
-}
-
-// UseEasyJSON is an experimental setting that enables serialization
-// with github.com/mailru/easyjson, which should in faster serialization
-// time and less allocations, but removed compatibility with encoding/json,
-// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
-// for details. This setting is disabled by default.
-func (r *BulkIndexRequest) UseEasyJSON(enable bool) *BulkIndexRequest {
- r.useEasyJSON = enable
- return r
-}
-
-// Index specifies the Elasticsearch index to use for this index request.
-// If unspecified, the index set on the BulkService will be used.
-func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
- r.index = index
- r.source = nil
- return r
-}
-
-// Type specifies the Elasticsearch type to use for this index request.
-// If unspecified, the type set on the BulkService will be used.
-func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
- r.typ = typ
- r.source = nil
- return r
-}
-
-// Id specifies the identifier of the document to index.
-func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
- r.id = id
- r.source = nil
- return r
-}
-
-// OpType specifies if this request should follow create-only or upsert
-// behavior. This follows the OpType of the standard document index API.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#operation-type
-// for details.
-func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
- r.opType = opType
- r.source = nil
- return r
-}
-
-// Routing specifies a routing value for the request.
-func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
- r.routing = routing
- r.source = nil
- return r
-}
-
-// Parent specifies the identifier of the parent document (if available).
-func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
- r.parent = parent
- r.source = nil
- return r
-}
-
-// Version indicates the version of the document as part of an optimistic
-// concurrency model.
-func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
- r.version = version
- r.source = nil
- return r
-}
-
-// VersionType specifies how versions are created. It can be e.g. internal,
-// external, external_gte, or force.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#index-versioning
-// for details.
-func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
- r.versionType = versionType
- r.source = nil
- return r
-}
-
-// Doc specifies the document to index.
-func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
- r.doc = doc
- r.source = nil
- return r
-}
-
-// RetryOnConflict specifies how often to retry in case of a version conflict.
-func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest {
- r.retryOnConflict = &retryOnConflict
- r.source = nil
- return r
-}
-
-// Pipeline to use while processing the request.
-func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest {
- r.pipeline = pipeline
- r.source = nil
- return r
-}
-
-// String returns the on-wire representation of the index request,
-// concatenated as a single string.
-func (r *BulkIndexRequest) String() string {
- lines, err := r.Source()
- if err != nil {
- return fmt.Sprintf("error: %v", err)
- }
- return strings.Join(lines, "\n")
-}
-
-// Source returns the on-wire representation of the index request,
-// split into an action-and-meta-data line and an (optional) source line.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for details.
-func (r *BulkIndexRequest) Source() ([]string, error) {
- // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
- // { "field1" : "value1" }
-
- if r.source != nil {
- return r.source, nil
- }
-
- lines := make([]string, 2)
-
- // "index" ...
- indexCommand := bulkIndexRequestCommandOp{
- Index: r.index,
- Type: r.typ,
- Id: r.id,
- Routing: r.routing,
- Parent: r.parent,
- Version: r.version,
- VersionType: r.versionType,
- RetryOnConflict: r.retryOnConflict,
- Pipeline: r.pipeline,
- }
- command := bulkIndexRequestCommand{
- r.opType: indexCommand,
- }
-
- var err error
- var body []byte
- if r.useEasyJSON {
- // easyjson
- body, err = command.MarshalJSON()
- } else {
- // encoding/json
- body, err = json.Marshal(command)
- }
- if err != nil {
- return nil, err
- }
-
- lines[0] = string(body)
-
- // "field1" ...
- if r.doc != nil {
- switch t := r.doc.(type) {
- default:
- body, err := json.Marshal(r.doc)
- if err != nil {
- return nil, err
- }
- lines[1] = string(body)
- case json.RawMessage:
- lines[1] = string(t)
- case *json.RawMessage:
- lines[1] = string(*t)
- case string:
- lines[1] = t
- case *string:
- lines[1] = *t
- }
- } else {
- lines[1] = "{}"
- }
-
- r.source = lines
- return lines, nil
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go b/vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go
deleted file mode 100644
index f8792978f..000000000
--- a/vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
-
-package elastic
-
-import (
- json "encoding/json"
- easyjson "github.com/mailru/easyjson"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
-)
-
-// suppress unused package warning
-var (
- _ *json.RawMessage
- _ *jlexer.Lexer
- _ *jwriter.Writer
- _ easyjson.Marshaler
-)
-
-func easyjson9de0fcbfDecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkIndexRequestCommandOp) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeString()
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "_index":
- out.Index = string(in.String())
- case "_id":
- out.Id = string(in.String())
- case "_type":
- out.Type = string(in.String())
- case "parent":
- out.Parent = string(in.String())
- case "retry_on_conflict":
- if in.IsNull() {
- in.Skip()
- out.RetryOnConflict = nil
- } else {
- if out.RetryOnConflict == nil {
- out.RetryOnConflict = new(int)
- }
- *out.RetryOnConflict = int(in.Int())
- }
- case "routing":
- out.Routing = string(in.String())
- case "version":
- out.Version = int64(in.Int64())
- case "version_type":
- out.VersionType = string(in.String())
- case "pipeline":
- out.Pipeline = string(in.String())
- default:
- in.SkipRecursive()
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-func easyjson9de0fcbfEncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkIndexRequestCommandOp) {
- out.RawByte('{')
- first := true
- _ = first
- if in.Index != "" {
- const prefix string = ",\"_index\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Index))
- }
- if in.Id != "" {
- const prefix string = ",\"_id\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Id))
- }
- if in.Type != "" {
- const prefix string = ",\"_type\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Type))
- }
- if in.Parent != "" {
- const prefix string = ",\"parent\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Parent))
- }
- if in.RetryOnConflict != nil {
- const prefix string = ",\"retry_on_conflict\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Int(int(*in.RetryOnConflict))
- }
- if in.Routing != "" {
- const prefix string = ",\"routing\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Routing))
- }
- if in.Version != 0 {
- const prefix string = ",\"version\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Int64(int64(in.Version))
- }
- if in.VersionType != "" {
- const prefix string = ",\"version_type\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.VersionType))
- }
- if in.Pipeline != "" {
- const prefix string = ",\"pipeline\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Pipeline))
- }
- out.RawByte('}')
-}
-
-// MarshalJSON supports json.Marshaler interface
-func (v bulkIndexRequestCommandOp) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- easyjson9de0fcbfEncodeGithubComOlivereElastic(&w, v)
- return w.Buffer.BuildBytes(), w.Error
-}
-
-// MarshalEasyJSON supports easyjson.Marshaler interface
-func (v bulkIndexRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
- easyjson9de0fcbfEncodeGithubComOlivereElastic(w, v)
-}
-
-// UnmarshalJSON supports json.Unmarshaler interface
-func (v *bulkIndexRequestCommandOp) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- easyjson9de0fcbfDecodeGithubComOlivereElastic(&r, v)
- return r.Error()
-}
-
-// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
-func (v *bulkIndexRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
- easyjson9de0fcbfDecodeGithubComOlivereElastic(l, v)
-}
-func easyjson9de0fcbfDecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkIndexRequestCommand) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- in.Skip()
- } else {
- in.Delim('{')
- if !in.IsDelim('}') {
- *out = make(bulkIndexRequestCommand)
- } else {
- *out = nil
- }
- for !in.IsDelim('}') {
- key := string(in.String())
- in.WantColon()
- var v1 bulkIndexRequestCommandOp
- (v1).UnmarshalEasyJSON(in)
- (*out)[key] = v1
- in.WantComma()
- }
- in.Delim('}')
- }
- if isTopLevel {
- in.Consumed()
- }
-}
-func easyjson9de0fcbfEncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkIndexRequestCommand) {
- if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
- out.RawString(`null`)
- } else {
- out.RawByte('{')
- v2First := true
- for v2Name, v2Value := range in {
- if v2First {
- v2First = false
- } else {
- out.RawByte(',')
- }
- out.String(string(v2Name))
- out.RawByte(':')
- (v2Value).MarshalEasyJSON(out)
- }
- out.RawByte('}')
- }
-}
-
-// MarshalJSON supports json.Marshaler interface
-func (v bulkIndexRequestCommand) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- easyjson9de0fcbfEncodeGithubComOlivereElastic1(&w, v)
- return w.Buffer.BuildBytes(), w.Error
-}
-
-// MarshalEasyJSON supports easyjson.Marshaler interface
-func (v bulkIndexRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
- easyjson9de0fcbfEncodeGithubComOlivereElastic1(w, v)
-}
-
-// UnmarshalJSON supports json.Unmarshaler interface
-func (v *bulkIndexRequestCommand) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- easyjson9de0fcbfDecodeGithubComOlivereElastic1(&r, v)
- return r.Error()
-}
-
-// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
-func (v *bulkIndexRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
- easyjson9de0fcbfDecodeGithubComOlivereElastic1(l, v)
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_index_request_test.go b/vendor/github.com/olivere/elastic/bulk_index_request_test.go
deleted file mode 100644
index 79baf51fb..000000000
--- a/vendor/github.com/olivere/elastic/bulk_index_request_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "testing"
- "time"
-)
-
-func TestBulkIndexRequestSerialization(t *testing.T) {
- tests := []struct {
- Request BulkableRequest
- Expected []string
- }{
- // #0
- {
- Request: NewBulkIndexRequest().Index("index1").Type("doc").Id("1").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
- Expected: []string{
- `{"index":{"_index":"index1","_id":"1","_type":"doc"}}`,
- `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
- },
- },
- // #1
- {
- Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("doc").Id("1").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
- Expected: []string{
- `{"create":{"_index":"index1","_id":"1","_type":"doc"}}`,
- `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
- },
- },
- // #2
- {
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
- Expected: []string{
- `{"index":{"_index":"index1","_id":"1","_type":"doc"}}`,
- `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
- },
- },
- // #3
- {
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").RetryOnConflict(42).
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
- Expected: []string{
- `{"index":{"_index":"index1","_id":"1","_type":"doc","retry_on_conflict":42}}`,
- `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
- },
- },
- // #4
- {
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").Pipeline("my_pipeline").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
- Expected: []string{
- `{"index":{"_index":"index1","_id":"1","_type":"doc","pipeline":"my_pipeline"}}`,
- `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
- },
- },
- // #5
- {
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").
- Routing("123").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
- Expected: []string{
- `{"index":{"_index":"index1","_id":"1","_type":"doc","routing":"123"}}`,
- `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
- },
- },
- }
-
- for i, test := range tests {
- lines, err := test.Request.Source()
- if err != nil {
- t.Fatalf("case #%d: expected no error, got: %v", i, err)
- }
- if lines == nil {
- t.Fatalf("case #%d: expected lines, got nil", i)
- }
- if len(lines) != len(test.Expected) {
- t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
- }
- for j, line := range lines {
- if line != test.Expected[j] {
- t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
- }
- }
- }
-}
-
-var bulkIndexRequestSerializationResult string
-
-func BenchmarkBulkIndexRequestSerialization(b *testing.B) {
- b.Run("stdlib", func(b *testing.B) {
- r := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
- benchmarkBulkIndexRequestSerialization(b, r.UseEasyJSON(false))
- })
- b.Run("easyjson", func(b *testing.B) {
- r := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
- benchmarkBulkIndexRequestSerialization(b, r.UseEasyJSON(true))
- })
-}
-
-func benchmarkBulkIndexRequestSerialization(b *testing.B, r *BulkIndexRequest) {
- var s string
- for n := 0; n < b.N; n++ {
- s = r.String()
- r.source = nil // Don't let caching spoil the benchmark
- }
- bulkIndexRequestSerializationResult = s // ensure the compiler doesn't optimize
- b.ReportAllocs()
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_processor.go b/vendor/github.com/olivere/elastic/bulk_processor.go
deleted file mode 100644
index 6ee8a3dee..000000000
--- a/vendor/github.com/olivere/elastic/bulk_processor.go
+++ /dev/null
@@ -1,600 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "net"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// BulkProcessorService allows to easily process bulk requests. It allows setting
-// policies when to flush new bulk requests, e.g. based on a number of actions,
-// on the size of the actions, and/or to flush periodically. It also allows
-// to control the number of concurrent bulk requests allowed to be executed
-// in parallel.
-//
-// BulkProcessorService, by default, commits either every 1000 requests or when the
-// (estimated) size of the bulk requests exceeds 5 MB. However, it does not
-// commit periodically. BulkProcessorService also does retry by default, using
-// an exponential backoff algorithm.
-//
-// The caller is responsible for setting the index and type on every
-// bulk request added to BulkProcessorService.
-//
-// BulkProcessorService takes ideas from the BulkProcessor of the
-// Elasticsearch Java API as documented in
-// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html.
-type BulkProcessorService struct {
- c *Client
- beforeFn BulkBeforeFunc
- afterFn BulkAfterFunc
- name string // name of processor
- numWorkers int // # of workers (>= 1)
- bulkActions int // # of requests after which to commit
- bulkSize int // # of bytes after which to commit
- flushInterval time.Duration // periodic flush interval
- wantStats bool // indicates whether to gather statistics
- backoff Backoff // a custom Backoff to use for errors
-}
-
-// NewBulkProcessorService creates a new BulkProcessorService.
-func NewBulkProcessorService(client *Client) *BulkProcessorService {
- return &BulkProcessorService{
- c: client,
- numWorkers: 1,
- bulkActions: 1000,
- bulkSize: 5 << 20, // 5 MB
- backoff: NewExponentialBackoff(
- time.Duration(200)*time.Millisecond,
- time.Duration(10000)*time.Millisecond,
- ),
- }
-}
-
-// BulkBeforeFunc defines the signature of callbacks that are executed
-// before a commit to Elasticsearch.
-type BulkBeforeFunc func(executionId int64, requests []BulkableRequest)
-
-// BulkAfterFunc defines the signature of callbacks that are executed
-// after a commit to Elasticsearch. The err parameter signals an error.
-type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error)
-
-// Before specifies a function to be executed before bulk requests get comitted
-// to Elasticsearch.
-func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService {
- s.beforeFn = fn
- return s
-}
-
-// After specifies a function to be executed when bulk requests have been
-// comitted to Elasticsearch. The After callback executes both when the
-// commit was successful as well as on failures.
-func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService {
- s.afterFn = fn
- return s
-}
-
-// Name is an optional name to identify this bulk processor.
-func (s *BulkProcessorService) Name(name string) *BulkProcessorService {
- s.name = name
- return s
-}
-
-// Workers is the number of concurrent workers allowed to be
-// executed. Defaults to 1 and must be greater or equal to 1.
-func (s *BulkProcessorService) Workers(num int) *BulkProcessorService {
- s.numWorkers = num
- return s
-}
-
-// BulkActions specifies when to flush based on the number of actions
-// currently added. Defaults to 1000 and can be set to -1 to be disabled.
-func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService {
- s.bulkActions = bulkActions
- return s
-}
-
-// BulkSize specifies when to flush based on the size (in bytes) of the actions
-// currently added. Defaults to 5 MB and can be set to -1 to be disabled.
-func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService {
- s.bulkSize = bulkSize
- return s
-}
-
-// FlushInterval specifies when to flush at the end of the given interval.
-// This is disabled by default. If you want the bulk processor to
-// operate completely asynchronously, set both BulkActions and BulkSize to
-// -1 and set the FlushInterval to a meaningful interval.
-func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService {
- s.flushInterval = interval
- return s
-}
-
-// Stats tells bulk processor to gather stats while running.
-// Use Stats to return the stats. This is disabled by default.
-func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService {
- s.wantStats = wantStats
- return s
-}
-
-// Backoff sets the backoff strategy to use for errors.
-func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService {
- s.backoff = backoff
- return s
-}
-
-// Do creates a new BulkProcessor and starts it.
-// Consider the BulkProcessor as a running instance that accepts bulk requests
-// and commits them to Elasticsearch, spreading the work across one or more
-// workers.
-//
-// You can interoperate with the BulkProcessor returned by Do, e.g. Start and
-// Stop (or Close) it.
-//
-// Context is an optional context that is passed into the bulk request
-// service calls. In contrast to other operations, this context is used in
-// a long running process. You could use it to pass e.g. loggers, but you
-// shouldn't use it for cancellation.
-//
-// Calling Do several times returns new BulkProcessors. You probably don't
-// want to do this. BulkProcessorService implements just a builder pattern.
-func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) {
- p := newBulkProcessor(
- s.c,
- s.beforeFn,
- s.afterFn,
- s.name,
- s.numWorkers,
- s.bulkActions,
- s.bulkSize,
- s.flushInterval,
- s.wantStats,
- s.backoff)
-
- err := p.Start(ctx)
- if err != nil {
- return nil, err
- }
- return p, nil
-}
-
-// -- Bulk Processor Statistics --
-
-// BulkProcessorStats contains various statistics of a bulk processor
-// while it is running. Use the Stats func to return it while running.
-type BulkProcessorStats struct {
- Flushed int64 // number of times the flush interval has been invoked
- Committed int64 // # of times workers committed bulk requests
- Indexed int64 // # of requests indexed
- Created int64 // # of requests that ES reported as creates (201)
- Updated int64 // # of requests that ES reported as updates
- Deleted int64 // # of requests that ES reported as deletes
- Succeeded int64 // # of requests that ES reported as successful
- Failed int64 // # of requests that ES reported as failed
-
- Workers []*BulkProcessorWorkerStats // stats for each worker
-}
-
-// BulkProcessorWorkerStats represents per-worker statistics.
-type BulkProcessorWorkerStats struct {
- Queued int64 // # of requests queued in this worker
- LastDuration time.Duration // duration of last commit
-}
-
-// newBulkProcessorStats initializes and returns a BulkProcessorStats struct.
-func newBulkProcessorStats(workers int) *BulkProcessorStats {
- stats := &BulkProcessorStats{
- Workers: make([]*BulkProcessorWorkerStats, workers),
- }
- for i := 0; i < workers; i++ {
- stats.Workers[i] = &BulkProcessorWorkerStats{}
- }
- return stats
-}
-
-func (st *BulkProcessorStats) dup() *BulkProcessorStats {
- dst := new(BulkProcessorStats)
- dst.Flushed = st.Flushed
- dst.Committed = st.Committed
- dst.Indexed = st.Indexed
- dst.Created = st.Created
- dst.Updated = st.Updated
- dst.Deleted = st.Deleted
- dst.Succeeded = st.Succeeded
- dst.Failed = st.Failed
- for _, src := range st.Workers {
- dst.Workers = append(dst.Workers, src.dup())
- }
- return dst
-}
-
-func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats {
- dst := new(BulkProcessorWorkerStats)
- dst.Queued = st.Queued
- dst.LastDuration = st.LastDuration
- return dst
-}
-
-// -- Bulk Processor --
-
-// BulkProcessor encapsulates a task that accepts bulk requests and
-// orchestrates committing them to Elasticsearch via one or more workers.
-//
-// BulkProcessor is returned by setting up a BulkProcessorService and
-// calling the Do method.
-type BulkProcessor struct {
- c *Client
- beforeFn BulkBeforeFunc
- afterFn BulkAfterFunc
- name string
- bulkActions int
- bulkSize int
- numWorkers int
- executionId int64
- requestsC chan BulkableRequest
- workerWg sync.WaitGroup
- workers []*bulkWorker
- flushInterval time.Duration
- flusherStopC chan struct{}
- wantStats bool
- backoff Backoff
-
- startedMu sync.Mutex // guards the following block
- started bool
-
- statsMu sync.Mutex // guards the following block
- stats *BulkProcessorStats
-
- stopReconnC chan struct{} // channel to signal stop reconnection attempts
-}
-
-func newBulkProcessor(
- client *Client,
- beforeFn BulkBeforeFunc,
- afterFn BulkAfterFunc,
- name string,
- numWorkers int,
- bulkActions int,
- bulkSize int,
- flushInterval time.Duration,
- wantStats bool,
- backoff Backoff) *BulkProcessor {
- return &BulkProcessor{
- c: client,
- beforeFn: beforeFn,
- afterFn: afterFn,
- name: name,
- numWorkers: numWorkers,
- bulkActions: bulkActions,
- bulkSize: bulkSize,
- flushInterval: flushInterval,
- wantStats: wantStats,
- backoff: backoff,
- }
-}
-
-// Start starts the bulk processor. If the processor is already started,
-// nil is returned.
-func (p *BulkProcessor) Start(ctx context.Context) error {
- p.startedMu.Lock()
- defer p.startedMu.Unlock()
-
- if p.started {
- return nil
- }
-
- // We must have at least one worker.
- if p.numWorkers < 1 {
- p.numWorkers = 1
- }
-
- p.requestsC = make(chan BulkableRequest)
- p.executionId = 0
- p.stats = newBulkProcessorStats(p.numWorkers)
- p.stopReconnC = make(chan struct{})
-
- // Create and start up workers.
- p.workers = make([]*bulkWorker, p.numWorkers)
- for i := 0; i < p.numWorkers; i++ {
- p.workerWg.Add(1)
- p.workers[i] = newBulkWorker(p, i)
- go p.workers[i].work(ctx)
- }
-
- // Start the ticker for flush (if enabled)
- if int64(p.flushInterval) > 0 {
- p.flusherStopC = make(chan struct{})
- go p.flusher(p.flushInterval)
- }
-
- p.started = true
-
- return nil
-}
-
-// Stop is an alias for Close.
-func (p *BulkProcessor) Stop() error {
- return p.Close()
-}
-
-// Close stops the bulk processor previously started with Do.
-// If it is already stopped, this is a no-op and nil is returned.
-//
-// By implementing Close, BulkProcessor implements the io.Closer interface.
-func (p *BulkProcessor) Close() error {
- p.startedMu.Lock()
- defer p.startedMu.Unlock()
-
- // Already stopped? Do nothing.
- if !p.started {
- return nil
- }
-
- // Tell connection checkers to stop
- if p.stopReconnC != nil {
- close(p.stopReconnC)
- p.stopReconnC = nil
- }
-
- // Stop flusher (if enabled)
- if p.flusherStopC != nil {
- p.flusherStopC <- struct{}{}
- <-p.flusherStopC
- close(p.flusherStopC)
- p.flusherStopC = nil
- }
-
- // Stop all workers.
- close(p.requestsC)
- p.workerWg.Wait()
-
- p.started = false
-
- return nil
-}
-
-// Stats returns the latest bulk processor statistics.
-// Collecting stats must be enabled first by calling Stats(true) on
-// the service that created this processor.
-func (p *BulkProcessor) Stats() BulkProcessorStats {
- p.statsMu.Lock()
- defer p.statsMu.Unlock()
- return *p.stats.dup()
-}
-
-// Add adds a single request to commit by the BulkProcessorService.
-//
-// The caller is responsible for setting the index and type on the request.
-func (p *BulkProcessor) Add(request BulkableRequest) {
- p.requestsC <- request
-}
-
-// Flush manually asks all workers to commit their outstanding requests.
-// It returns only when all workers acknowledge completion.
-func (p *BulkProcessor) Flush() error {
- p.statsMu.Lock()
- p.stats.Flushed++
- p.statsMu.Unlock()
-
- for _, w := range p.workers {
- w.flushC <- struct{}{}
- <-w.flushAckC // wait for completion
- }
- return nil
-}
-
-// flusher is a single goroutine that periodically asks all workers to
-// commit their outstanding bulk requests. It is only started if
-// FlushInterval is greater than 0.
-func (p *BulkProcessor) flusher(interval time.Duration) {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C: // Periodic flush
- p.Flush() // TODO swallow errors here?
-
- case <-p.flusherStopC:
- p.flusherStopC <- struct{}{}
- return
- }
- }
-}
-
-// -- Bulk Worker --
-
-// bulkWorker encapsulates a single worker, running in a goroutine,
-// receiving bulk requests and eventually committing them to Elasticsearch.
-// It is strongly bound to a BulkProcessor.
-type bulkWorker struct {
- p *BulkProcessor
- i int
- bulkActions int
- bulkSize int
- service *BulkService
- flushC chan struct{}
- flushAckC chan struct{}
-}
-
-// newBulkWorker creates a new bulkWorker instance.
-func newBulkWorker(p *BulkProcessor, i int) *bulkWorker {
- return &bulkWorker{
- p: p,
- i: i,
- bulkActions: p.bulkActions,
- bulkSize: p.bulkSize,
- service: NewBulkService(p.c),
- flushC: make(chan struct{}),
- flushAckC: make(chan struct{}),
- }
-}
-
-// work waits for bulk requests and manual flush calls on the respective
-// channels and is invoked as a goroutine when the bulk processor is started.
-func (w *bulkWorker) work(ctx context.Context) {
- defer func() {
- w.p.workerWg.Done()
- close(w.flushAckC)
- close(w.flushC)
- }()
-
- var stop bool
- for !stop {
- var err error
- select {
- case req, open := <-w.p.requestsC:
- if open {
- // Received a new request
- w.service.Add(req)
- if w.commitRequired() {
- err = w.commit(ctx)
- }
- } else {
- // Channel closed: Stop.
- stop = true
- if w.service.NumberOfActions() > 0 {
- err = w.commit(ctx)
- }
- }
-
- case <-w.flushC:
- // Commit outstanding requests
- if w.service.NumberOfActions() > 0 {
- err = w.commit(ctx)
- }
- w.flushAckC <- struct{}{}
- }
- if !stop && err != nil {
- waitForActive := func() {
- // Add back pressure to prevent Add calls from filling up the request queue
- ready := make(chan struct{})
- go w.waitForActiveConnection(ready)
- <-ready
- }
- if _, ok := err.(net.Error); ok {
- waitForActive()
- } else if IsConnErr(err) {
- waitForActive()
- }
- }
- }
-}
-
-// commit commits the bulk requests in the given service,
-// invoking callbacks as specified.
-func (w *bulkWorker) commit(ctx context.Context) error {
- var res *BulkResponse
-
- // commitFunc will commit bulk requests and, on failure, be retried
- // via exponential backoff
- commitFunc := func() error {
- var err error
- res, err = w.service.Do(ctx)
- return err
- }
- // notifyFunc will be called if retry fails
- notifyFunc := func(err error) {
- w.p.c.errorf("elastic: bulk processor %q failed but may retry: %v", w.p.name, err)
- }
-
- id := atomic.AddInt64(&w.p.executionId, 1)
-
- // Update # documents in queue before eventual retries
- w.p.statsMu.Lock()
- if w.p.wantStats {
- w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
- }
- w.p.statsMu.Unlock()
-
- // Save requests because they will be reset in commitFunc
- reqs := w.service.requests
-
- // Invoke before callback
- if w.p.beforeFn != nil {
- w.p.beforeFn(id, reqs)
- }
-
- // Commit bulk requests
- err := RetryNotify(commitFunc, w.p.backoff, notifyFunc)
- w.updateStats(res)
- if err != nil {
- w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
- }
-
- // Invoke after callback
- if w.p.afterFn != nil {
- w.p.afterFn(id, reqs, res, err)
- }
-
- return err
-}
-
-func (w *bulkWorker) waitForActiveConnection(ready chan<- struct{}) {
- defer close(ready)
-
- t := time.NewTicker(5 * time.Second)
- defer t.Stop()
-
- client := w.p.c
- stopReconnC := w.p.stopReconnC
- w.p.c.errorf("elastic: bulk processor %q is waiting for an active connection", w.p.name)
-
- // loop until a health check finds at least 1 active connection or the reconnection channel is closed
- for {
- select {
- case _, ok := <-stopReconnC:
- if !ok {
- w.p.c.errorf("elastic: bulk processor %q active connection check interrupted", w.p.name)
- return
- }
- case <-t.C:
- client.healthcheck(time.Duration(3)*time.Second, true)
- if client.mustActiveConn() == nil {
- // found an active connection
- // exit and signal done to the WaitGroup
- return
- }
- }
- }
-}
-
-func (w *bulkWorker) updateStats(res *BulkResponse) {
- // Update stats
- if res != nil {
- w.p.statsMu.Lock()
- if w.p.wantStats {
- w.p.stats.Committed++
- if res != nil {
- w.p.stats.Indexed += int64(len(res.Indexed()))
- w.p.stats.Created += int64(len(res.Created()))
- w.p.stats.Updated += int64(len(res.Updated()))
- w.p.stats.Deleted += int64(len(res.Deleted()))
- w.p.stats.Succeeded += int64(len(res.Succeeded()))
- w.p.stats.Failed += int64(len(res.Failed()))
- }
- w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
- w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond
- }
- w.p.statsMu.Unlock()
- }
-}
-
-// commitRequired returns true if the service has to commit its
-// bulk requests. This can be either because the number of actions
-// or the estimated size in bytes is larger than specified in the
-// BulkProcessorService.
-func (w *bulkWorker) commitRequired() bool {
- if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions {
- return true
- }
- if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) {
- return true
- }
- return false
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_processor_test.go b/vendor/github.com/olivere/elastic/bulk_processor_test.go
deleted file mode 100644
index bb97ca217..000000000
--- a/vendor/github.com/olivere/elastic/bulk_processor_test.go
+++ /dev/null
@@ -1,425 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "math/rand"
- "sync/atomic"
- "testing"
- "time"
-)
-
-func TestBulkProcessorDefaults(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- p := client.BulkProcessor()
- if p == nil {
- t.Fatalf("expected BulkProcessorService; got: %v", p)
- }
- if got, want := p.name, ""; got != want {
- t.Errorf("expected %q; got: %q", want, got)
- }
- if got, want := p.numWorkers, 1; got != want {
- t.Errorf("expected %d; got: %d", want, got)
- }
- if got, want := p.bulkActions, 1000; got != want {
- t.Errorf("expected %d; got: %d", want, got)
- }
- if got, want := p.bulkSize, 5*1024*1024; got != want {
- t.Errorf("expected %d; got: %d", want, got)
- }
- if got, want := p.flushInterval, time.Duration(0); got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := p.wantStats, false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if p.backoff == nil {
- t.Fatalf("expected non-nill backoff; got: %v", p.backoff)
- }
-}
-
-func TestBulkProcessorCommitOnBulkActions(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- testBulkProcessor(t,
- 10000,
- client.BulkProcessor().
- Name("Actions-1").
- Workers(1).
- BulkActions(100).
- BulkSize(-1),
- )
-
- testBulkProcessor(t,
- 10000,
- client.BulkProcessor().
- Name("Actions-2").
- Workers(2).
- BulkActions(100).
- BulkSize(-1),
- )
-}
-
-func TestBulkProcessorCommitOnBulkSize(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- testBulkProcessor(t,
- 10000,
- client.BulkProcessor().
- Name("Size-1").
- Workers(1).
- BulkActions(-1).
- BulkSize(64*1024),
- )
-
- testBulkProcessor(t,
- 10000,
- client.BulkProcessor().
- Name("Size-2").
- Workers(2).
- BulkActions(-1).
- BulkSize(64*1024),
- )
-}
-
-func TestBulkProcessorBasedOnFlushInterval(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- var beforeRequests int64
- var befores int64
- var afters int64
- var failures int64
- var afterRequests int64
-
- beforeFn := func(executionId int64, requests []BulkableRequest) {
- atomic.AddInt64(&beforeRequests, int64(len(requests)))
- atomic.AddInt64(&befores, 1)
- }
- afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
- atomic.AddInt64(&afters, 1)
- if err != nil {
- atomic.AddInt64(&failures, 1)
- }
- atomic.AddInt64(&afterRequests, int64(len(requests)))
- }
-
- svc := client.BulkProcessor().
- Name("FlushInterval-1").
- Workers(2).
- BulkActions(-1).
- BulkSize(-1).
- FlushInterval(1 * time.Second).
- Before(beforeFn).
- After(afterFn)
-
- p, err := svc.Do(context.Background())
- if err != nil {
- t.Fatal(err)
- }
-
- const numDocs = 1000 // low-enough number that flush should be invoked
-
- for i := 1; i <= numDocs; i++ {
- tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
- p.Add(request)
- }
-
- // Should flush at least once
- time.Sleep(2 * time.Second)
-
- err = p.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- if p.stats.Flushed == 0 {
- t.Errorf("expected at least 1 flush; got: %d", p.stats.Flushed)
- }
- if got, want := beforeRequests, int64(numDocs); got != want {
- t.Errorf("expected %d requests to before callback; got: %d", want, got)
- }
- if got, want := afterRequests, int64(numDocs); got != want {
- t.Errorf("expected %d requests to after callback; got: %d", want, got)
- }
- if befores == 0 {
- t.Error("expected at least 1 call to before callback")
- }
- if afters == 0 {
- t.Error("expected at least 1 call to after callback")
- }
- if failures != 0 {
- t.Errorf("expected 0 calls to failure callback; got: %d", failures)
- }
-
- // Check number of documents that were bulk indexed
- _, err = p.c.Flush(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- count, err := p.c.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != int64(numDocs) {
- t.Fatalf("expected %d documents; got: %d", numDocs, count)
- }
-}
-
-func TestBulkProcessorClose(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- var beforeRequests int64
- var befores int64
- var afters int64
- var failures int64
- var afterRequests int64
-
- beforeFn := func(executionId int64, requests []BulkableRequest) {
- atomic.AddInt64(&beforeRequests, int64(len(requests)))
- atomic.AddInt64(&befores, 1)
- }
- afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
- atomic.AddInt64(&afters, 1)
- if err != nil {
- atomic.AddInt64(&failures, 1)
- }
- atomic.AddInt64(&afterRequests, int64(len(requests)))
- }
-
- p, err := client.BulkProcessor().
- Name("FlushInterval-1").
- Workers(2).
- BulkActions(-1).
- BulkSize(-1).
- FlushInterval(30 * time.Second). // 30 seconds to flush
- Before(beforeFn).After(afterFn).
- Do(context.Background())
- if err != nil {
- t.Fatal(err)
- }
-
- const numDocs = 1000 // low-enough number that flush should be invoked
-
- for i := 1; i <= numDocs; i++ {
- tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
- p.Add(request)
- }
-
- // Should not flush because 30s > 1s
- time.Sleep(1 * time.Second)
-
- // Close should flush
- err = p.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- if p.stats.Flushed != 0 {
- t.Errorf("expected no flush; got: %d", p.stats.Flushed)
- }
- if got, want := beforeRequests, int64(numDocs); got != want {
- t.Errorf("expected %d requests to before callback; got: %d", want, got)
- }
- if got, want := afterRequests, int64(numDocs); got != want {
- t.Errorf("expected %d requests to after callback; got: %d", want, got)
- }
- if befores == 0 {
- t.Error("expected at least 1 call to before callback")
- }
- if afters == 0 {
- t.Error("expected at least 1 call to after callback")
- }
- if failures != 0 {
- t.Errorf("expected 0 calls to failure callback; got: %d", failures)
- }
-
- // Check number of documents that were bulk indexed
- _, err = p.c.Flush(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- count, err := p.c.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != int64(numDocs) {
- t.Fatalf("expected %d documents; got: %d", numDocs, count)
- }
-}
-
-func TestBulkProcessorFlush(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- p, err := client.BulkProcessor().
- Name("ManualFlush").
- Workers(10).
- BulkActions(-1).
- BulkSize(-1).
- FlushInterval(30 * time.Second). // 30 seconds to flush
- Stats(true).
- Do(context.Background())
- if err != nil {
- t.Fatal(err)
- }
-
- const numDocs = 100
-
- for i := 1; i <= numDocs; i++ {
- tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
- p.Add(request)
- }
-
- // Should not flush because 30s > 1s
- time.Sleep(1 * time.Second)
-
- // No flush yet
- stats := p.Stats()
- if stats.Flushed != 0 {
- t.Errorf("expected no flush; got: %d", p.stats.Flushed)
- }
-
- // Manual flush
- err = p.Flush()
- if err != nil {
- t.Fatal(err)
- }
-
- time.Sleep(1 * time.Second)
-
- // Now flushed
- stats = p.Stats()
- if got, want := p.stats.Flushed, int64(1); got != want {
- t.Errorf("expected %d flush; got: %d", want, got)
- }
-
- // Close should not start another flush
- err = p.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- // Still 1 flush
- stats = p.Stats()
- if got, want := p.stats.Flushed, int64(1); got != want {
- t.Errorf("expected %d flush; got: %d", want, got)
- }
-
- // Check number of documents that were bulk indexed
- _, err = p.c.Flush(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- count, err := p.c.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != int64(numDocs) {
- t.Fatalf("expected %d documents; got: %d", numDocs, count)
- }
-}
-
-// -- Helper --
-
-func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) {
- var beforeRequests int64
- var befores int64
- var afters int64
- var failures int64
- var afterRequests int64
-
- beforeFn := func(executionId int64, requests []BulkableRequest) {
- atomic.AddInt64(&beforeRequests, int64(len(requests)))
- atomic.AddInt64(&befores, 1)
- }
- afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
- atomic.AddInt64(&afters, 1)
- if err != nil {
- atomic.AddInt64(&failures, 1)
- }
- atomic.AddInt64(&afterRequests, int64(len(requests)))
- }
-
- p, err := svc.Before(beforeFn).After(afterFn).Stats(true).Do(context.Background())
- if err != nil {
- t.Fatal(err)
- }
-
- for i := 1; i <= numDocs; i++ {
- tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
- p.Add(request)
- }
-
- err = p.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- stats := p.Stats()
-
- if stats.Flushed != 0 {
- t.Errorf("expected no flush; got: %d", stats.Flushed)
- }
- if stats.Committed <= 0 {
- t.Errorf("expected committed > %d; got: %d", 0, stats.Committed)
- }
- if got, want := stats.Indexed, int64(numDocs); got != want {
- t.Errorf("expected indexed = %d; got: %d", want, got)
- }
- if got, want := stats.Created, int64(0); got != want {
- t.Errorf("expected created = %d; got: %d", want, got)
- }
- if got, want := stats.Updated, int64(0); got != want {
- t.Errorf("expected updated = %d; got: %d", want, got)
- }
- if got, want := stats.Deleted, int64(0); got != want {
- t.Errorf("expected deleted = %d; got: %d", want, got)
- }
- if got, want := stats.Succeeded, int64(numDocs); got != want {
- t.Errorf("expected succeeded = %d; got: %d", want, got)
- }
- if got, want := stats.Failed, int64(0); got != want {
- t.Errorf("expected failed = %d; got: %d", want, got)
- }
- if got, want := beforeRequests, int64(numDocs); got != want {
- t.Errorf("expected %d requests to before callback; got: %d", want, got)
- }
- if got, want := afterRequests, int64(numDocs); got != want {
- t.Errorf("expected %d requests to after callback; got: %d", want, got)
- }
- if befores == 0 {
- t.Error("expected at least 1 call to before callback")
- }
- if afters == 0 {
- t.Error("expected at least 1 call to after callback")
- }
- if failures != 0 {
- t.Errorf("expected 0 calls to failure callback; got: %d", failures)
- }
-
- // Check number of documents that were bulk indexed
- _, err = p.c.Flush(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- count, err := p.c.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != int64(numDocs) {
- t.Fatalf("expected %d documents; got: %d", numDocs, count)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_request.go b/vendor/github.com/olivere/elastic/bulk_request.go
deleted file mode 100644
index ce3bf0768..000000000
--- a/vendor/github.com/olivere/elastic/bulk_request.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "fmt"
-)
-
-// -- Bulkable request (index/update/delete) --
-
-// BulkableRequest is a generic interface to bulkable requests.
-type BulkableRequest interface {
- fmt.Stringer
- Source() ([]string, error)
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_test.go b/vendor/github.com/olivere/elastic/bulk_test.go
deleted file mode 100644
index f31ed6613..000000000
--- a/vendor/github.com/olivere/elastic/bulk_test.go
+++ /dev/null
@@ -1,600 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "math/rand"
- "net/http"
- "net/http/httptest"
- "testing"
-)
-
-func TestBulk(t *testing.T) {
- client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
-
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
- index2Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
-
- bulkRequest := client.Bulk()
- bulkRequest = bulkRequest.Add(index1Req)
- bulkRequest = bulkRequest.Add(index2Req)
- bulkRequest = bulkRequest.Add(delete1Req)
-
- if bulkRequest.NumberOfActions() != 3 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
- }
-
- bulkResponse, err := bulkRequest.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if bulkResponse == nil {
- t.Errorf("expected bulkResponse to be != nil; got nil")
- }
-
- if bulkRequest.NumberOfActions() != 0 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
- }
-
- // Document with Id="1" should not exist
- exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if exists {
- t.Errorf("expected exists %v; got %v", false, exists)
- }
-
- // Document with Id="2" should exist
- exists, err = client.Exists().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !exists {
- t.Errorf("expected exists %v; got %v", true, exists)
- }
-
- // Update
- updateDoc := struct {
- Retweets int `json:"retweets"`
- }{
- 42,
- }
- update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").Doc(&updateDoc)
- bulkRequest = client.Bulk()
- bulkRequest = bulkRequest.Add(update1Req)
-
- if bulkRequest.NumberOfActions() != 1 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
- }
-
- bulkResponse, err = bulkRequest.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if bulkResponse == nil {
- t.Errorf("expected bulkResponse to be != nil; got nil")
- }
-
- if bulkRequest.NumberOfActions() != 0 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
- }
-
- // Document with Id="1" should have a retweets count of 42
- doc, err := client.Get().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if doc == nil {
- t.Fatal("expected doc to be != nil; got nil")
- }
- if !doc.Found {
- t.Fatalf("expected doc to be found; got found = %v", doc.Found)
- }
- if doc.Source == nil {
- t.Fatal("expected doc source to be != nil; got nil")
- }
- var updatedTweet tweet
- err = json.Unmarshal(*doc.Source, &updatedTweet)
- if err != nil {
- t.Fatal(err)
- }
- if updatedTweet.Retweets != 42 {
- t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets)
- }
-
- // Update with script
- update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
- RetryOnConflict(3).
- Script(NewScript("ctx._source.retweets += params.v").Param("v", 1))
- bulkRequest = client.Bulk()
- bulkRequest = bulkRequest.Add(update2Req)
- if bulkRequest.NumberOfActions() != 1 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
- }
- bulkResponse, err = bulkRequest.Refresh("wait_for").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if bulkResponse == nil {
- t.Errorf("expected bulkResponse to be != nil; got nil")
- }
-
- if bulkRequest.NumberOfActions() != 0 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
- }
-
- // Document with Id="1" should have a retweets count of 43
- doc, err = client.Get().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if doc == nil {
- t.Fatal("expected doc to be != nil; got nil")
- }
- if !doc.Found {
- t.Fatalf("expected doc to be found; got found = %v", doc.Found)
- }
- if doc.Source == nil {
- t.Fatal("expected doc source to be != nil; got nil")
- }
- err = json.Unmarshal(*doc.Source, &updatedTweet)
- if err != nil {
- t.Fatal(err)
- }
- if updatedTweet.Retweets != 43 {
- t.Errorf("expected updated tweet retweets = %v; got %v", 43, updatedTweet.Retweets)
- }
-}
-
-func TestBulkWithIndexSetOnClient(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
-
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1).Routing("1")
- index2Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
-
- bulkRequest := client.Bulk().Index(testIndexName).Type("doc")
- bulkRequest = bulkRequest.Add(index1Req)
- bulkRequest = bulkRequest.Add(index2Req)
- bulkRequest = bulkRequest.Add(delete1Req)
-
- if bulkRequest.NumberOfActions() != 3 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
- }
-
- bulkResponse, err := bulkRequest.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if bulkResponse == nil {
- t.Errorf("expected bulkResponse to be != nil; got nil")
- }
-
- // Document with Id="1" should not exist
- exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if exists {
- t.Errorf("expected exists %v; got %v", false, exists)
- }
-
- // Document with Id="2" should exist
- exists, err = client.Exists().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !exists {
- t.Errorf("expected exists %v; got %v", true, exists)
- }
-}
-
-func TestBulkIndexDeleteUpdate(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- //client := setupTestClientAndCreateIndexAndLog(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
-
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
- index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
- update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
- ReturnSource(true).
- Doc(struct {
- Retweets int `json:"retweets"`
- }{
- Retweets: 42,
- })
-
- bulkRequest := client.Bulk()
- bulkRequest = bulkRequest.Add(index1Req)
- bulkRequest = bulkRequest.Add(index2Req)
- bulkRequest = bulkRequest.Add(delete1Req)
- bulkRequest = bulkRequest.Add(update2Req)
-
- if bulkRequest.NumberOfActions() != 4 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
- }
-
- expected := `{"index":{"_index":"` + testIndexName + `","_id":"1","_type":"doc"}}
-{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
-{"create":{"_index":"` + testIndexName + `","_id":"2","_type":"doc"}}
-{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
-{"delete":{"_index":"` + testIndexName + `","_type":"doc","_id":"1"}}
-{"update":{"_index":"` + testIndexName + `","_type":"doc","_id":"2"}}
-{"doc":{"retweets":42},"_source":true}
-`
- got, err := bulkRequest.bodyAsString()
- if err != nil {
- t.Fatalf("expected no error, got: %v", err)
- }
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-
- // Run the bulk request
- bulkResponse, err := bulkRequest.Pretty(true).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if bulkResponse == nil {
- t.Errorf("expected bulkResponse to be != nil; got nil")
- }
- if bulkResponse.Took == 0 {
- t.Errorf("expected took to be > 0; got %d", bulkResponse.Took)
- }
- if bulkResponse.Errors {
- t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors)
- }
- if len(bulkResponse.Items) != 4 {
- t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items))
- }
-
- // Indexed actions
- indexed := bulkResponse.Indexed()
- if indexed == nil {
- t.Fatal("expected indexed to be != nil; got nil")
- }
- if len(indexed) != 1 {
- t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed))
- }
- if indexed[0].Id != "1" {
- t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id)
- }
- if indexed[0].Status != 201 {
- t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status)
- }
-
- // Created actions
- created := bulkResponse.Created()
- if created == nil {
- t.Fatal("expected created to be != nil; got nil")
- }
- if len(created) != 1 {
- t.Fatalf("expected len(created) == %d; got %d", 1, len(created))
- }
- if created[0].Id != "2" {
- t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id)
- }
- if created[0].Status != 201 {
- t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
- }
- if want, have := "created", created[0].Result; want != have {
- t.Errorf("expected created[0].Result == %q; got %q", want, have)
- }
-
- // Deleted actions
- deleted := bulkResponse.Deleted()
- if deleted == nil {
- t.Fatal("expected deleted to be != nil; got nil")
- }
- if len(deleted) != 1 {
- t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted))
- }
- if deleted[0].Id != "1" {
- t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id)
- }
- if deleted[0].Status != 200 {
- t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
- }
- if want, have := "deleted", deleted[0].Result; want != have {
- t.Errorf("expected deleted[0].Result == %q; got %q", want, have)
- }
-
- // Updated actions
- updated := bulkResponse.Updated()
- if updated == nil {
- t.Fatal("expected updated to be != nil; got nil")
- }
- if len(updated) != 1 {
- t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated))
- }
- if updated[0].Id != "2" {
- t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id)
- }
- if updated[0].Status != 200 {
- t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status)
- }
- if updated[0].Version != 2 {
- t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
- }
- if want, have := "updated", updated[0].Result; want != have {
- t.Errorf("expected updated[0].Result == %q; got %q", want, have)
- }
- if updated[0].GetResult == nil {
- t.Fatalf("expected updated[0].GetResult to be != nil; got nil")
- }
- if updated[0].GetResult.Source == nil {
- t.Fatalf("expected updated[0].GetResult.Source to be != nil; got nil")
- }
- if want, have := true, updated[0].GetResult.Found; want != have {
- t.Fatalf("expected updated[0].GetResult.Found to be != %v; got %v", want, have)
- }
- var doc tweet
- if err := json.Unmarshal(*updated[0].GetResult.Source, &doc); err != nil {
- t.Fatalf("expected to unmarshal updated[0].GetResult.Source; got %v", err)
- }
- if want, have := 42, doc.Retweets; want != have {
- t.Fatalf("expected updated tweet to have Retweets = %v; got %v", want, have)
- }
-
- // Succeeded actions
- succeeded := bulkResponse.Succeeded()
- if succeeded == nil {
- t.Fatal("expected succeeded to be != nil; got nil")
- }
- if len(succeeded) != 4 {
- t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded))
- }
-
- // ById
- id1Results := bulkResponse.ById("1")
- if id1Results == nil {
- t.Fatal("expected id1Results to be != nil; got nil")
- }
- if len(id1Results) != 2 {
- t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results))
- }
- if id1Results[0].Id != "1" {
- t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id)
- }
- if id1Results[0].Status != 201 {
- t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status)
- }
- if id1Results[0].Version != 1 {
- t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version)
- }
- if id1Results[1].Id != "1" {
- t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id)
- }
- if id1Results[1].Status != 200 {
- t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status)
- }
- if id1Results[1].Version != 2 {
- t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version)
- }
-}
-
-func TestFailedBulkRequests(t *testing.T) {
- js := `{
- "took" : 2,
- "errors" : true,
- "items" : [ {
- "index" : {
- "_index" : "elastic-test",
- "_type" : "doc",
- "_id" : "1",
- "_version" : 1,
- "status" : 201
- }
- }, {
- "create" : {
- "_index" : "elastic-test",
- "_type" : "doc",
- "_id" : "2",
- "_version" : 1,
- "status" : 423,
- "error" : {
- "type":"routing_missing_exception",
- "reason":"routing is required for [elastic-test2]/[comment]/[1]"
- }
- }
- }, {
- "delete" : {
- "_index" : "elastic-test",
- "_type" : "doc",
- "_id" : "1",
- "_version" : 2,
- "status" : 404,
- "found" : false
- }
- }, {
- "update" : {
- "_index" : "elastic-test",
- "_type" : "doc",
- "_id" : "2",
- "_version" : 2,
- "status" : 200
- }
- } ]
-}`
-
- var resp BulkResponse
- err := json.Unmarshal([]byte(js), &resp)
- if err != nil {
- t.Fatal(err)
- }
- failed := resp.Failed()
- if len(failed) != 2 {
- t.Errorf("expected %d failed items; got: %d", 2, len(failed))
- }
-}
-
-func TestBulkEstimatedSizeInBytes(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
-
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
- index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
- update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
- Doc(struct {
- Retweets int `json:"retweets"`
- }{
- Retweets: 42,
- })
-
- bulkRequest := client.Bulk()
- bulkRequest = bulkRequest.Add(index1Req)
- bulkRequest = bulkRequest.Add(index2Req)
- bulkRequest = bulkRequest.Add(delete1Req)
- bulkRequest = bulkRequest.Add(update2Req)
-
- if bulkRequest.NumberOfActions() != 4 {
- t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
- }
-
- // The estimated size of the bulk request in bytes must be at least
- // the length of the body request.
- raw, err := bulkRequest.bodyAsString()
- if err != nil {
- t.Fatal(err)
- }
- rawlen := int64(len([]byte(raw)))
-
- if got, want := bulkRequest.EstimatedSizeInBytes(), rawlen; got < want {
- t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got)
- }
-
- // Reset should also reset the calculated estimated byte size
- bulkRequest.reset()
-
- if got, want := bulkRequest.EstimatedSizeInBytes(), int64(0); got != want {
- t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got)
- }
-}
-
-func TestBulkEstimateSizeInBytesLength(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- s := client.Bulk()
- r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
- s = s.Add(r)
- if got, want := s.estimateSizeInBytes(r), int64(1+len(r.String())); got != want {
- t.Fatalf("expected %d; got: %d", want, got)
- }
-}
-
-func TestBulkContentType(t *testing.T) {
- var header http.Header
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- header = r.Header
- fmt.Fprintln(w, `{}`)
- }))
- defer ts.Close()
-
- client, err := NewSimpleClient(SetURL(ts.URL))
- if err != nil {
- t.Fatal(err)
- }
- indexReq := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."})
- if _, err := client.Bulk().Add(indexReq).Do(context.Background()); err != nil {
- t.Fatal(err)
- }
- if header == nil {
- t.Fatalf("expected header, got %v", header)
- }
- if want, have := "application/x-ndjson", header.Get("Content-Type"); want != have {
- t.Fatalf("Content-Type: want %q, have %q", want, have)
- }
-}
-
-// -- Benchmarks --
-
-var benchmarkBulkEstimatedSizeInBytes int64
-
-func BenchmarkBulkEstimatedSizeInBytesWith1Request(b *testing.B) {
- client := setupTestClientAndCreateIndex(b)
- s := client.Bulk()
- var result int64
- for n := 0; n < b.N; n++ {
- s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"1"}))
- s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"2"}))
- s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1"))
- result = s.EstimatedSizeInBytes()
- s.reset()
- }
- b.ReportAllocs()
- benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize
-}
-
-func BenchmarkBulkEstimatedSizeInBytesWith100Requests(b *testing.B) {
- client := setupTestClientAndCreateIndex(b)
- s := client.Bulk()
- var result int64
- for n := 0; n < b.N; n++ {
- for i := 0; i < 100; i++ {
- s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"1"}))
- s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"2"}))
- s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1"))
- }
- result = s.EstimatedSizeInBytes()
- s.reset()
- }
- b.ReportAllocs()
- benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize
-}
-
-func BenchmarkBulkAllocs(b *testing.B) {
- b.Run("1000 docs with 64 byte", func(b *testing.B) { benchmarkBulkAllocs(b, 64, 1000) })
- b.Run("1000 docs with 1 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 1024, 1000) })
- b.Run("1000 docs with 4 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 4096, 1000) })
- b.Run("1000 docs with 16 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 16*1024, 1000) })
- b.Run("1000 docs with 64 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 64*1024, 1000) })
- b.Run("1000 docs with 256 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 256*1024, 1000) })
- b.Run("1000 docs with 1 MiB", func(b *testing.B) { benchmarkBulkAllocs(b, 1024*1024, 1000) })
-}
-
-const (
- charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"
-)
-
-func benchmarkBulkAllocs(b *testing.B, size, num int) {
- buf := make([]byte, size)
- for i := range buf {
- buf[i] = charset[rand.Intn(len(charset))]
- }
-
- s := &BulkService{}
- n := 0
- for {
- n++
- s = s.Add(NewBulkIndexRequest().Index("test").Type("doc").Id("1").Doc(struct {
- S string `json:"s"`
- }{
- S: string(buf),
- }))
- if n >= num {
- break
- }
- }
- for i := 0; i < b.N; i++ {
- s.bodyAsString()
- }
- b.ReportAllocs()
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_update_request.go b/vendor/github.com/olivere/elastic/bulk_update_request.go
deleted file mode 100644
index 50e5adb8f..000000000
--- a/vendor/github.com/olivere/elastic/bulk_update_request.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-//go:generate easyjson bulk_update_request.go
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-)
-
-// BulkUpdateRequest is a request to update a document in Elasticsearch.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for details.
-type BulkUpdateRequest struct {
- BulkableRequest
- index string
- typ string
- id string
-
- routing string
- parent string
- script *Script
- scriptedUpsert *bool
- version int64 // default is MATCH_ANY
- versionType string // default is "internal"
- retryOnConflict *int
- upsert interface{}
- docAsUpsert *bool
- detectNoop *bool
- doc interface{}
- returnSource *bool
-
- source []string
-
- useEasyJSON bool
-}
-
-//easyjson:json
-type bulkUpdateRequestCommand map[string]bulkUpdateRequestCommandOp
-
-//easyjson:json
-type bulkUpdateRequestCommandOp struct {
- Index string `json:"_index,omitempty"`
- Type string `json:"_type,omitempty"`
- Id string `json:"_id,omitempty"`
- Parent string `json:"parent,omitempty"`
- // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
- RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
- Routing string `json:"routing,omitempty"`
- Version int64 `json:"version,omitempty"`
- VersionType string `json:"version_type,omitempty"`
-}
-
-//easyjson:json
-type bulkUpdateRequestCommandData struct {
- DetectNoop *bool `json:"detect_noop,omitempty"`
- Doc interface{} `json:"doc,omitempty"`
- DocAsUpsert *bool `json:"doc_as_upsert,omitempty"`
- Script interface{} `json:"script,omitempty"`
- ScriptedUpsert *bool `json:"scripted_upsert,omitempty"`
- Upsert interface{} `json:"upsert,omitempty"`
- Source *bool `json:"_source,omitempty"`
-}
-
-// NewBulkUpdateRequest returns a new BulkUpdateRequest.
-func NewBulkUpdateRequest() *BulkUpdateRequest {
- return &BulkUpdateRequest{}
-}
-
-// UseEasyJSON is an experimental setting that enables serialization
-// with github.com/mailru/easyjson, which should in faster serialization
-// time and less allocations, but removed compatibility with encoding/json,
-// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
-// for details. This setting is disabled by default.
-func (r *BulkUpdateRequest) UseEasyJSON(enable bool) *BulkUpdateRequest {
- r.useEasyJSON = enable
- return r
-}
-
-// Index specifies the Elasticsearch index to use for this update request.
-// If unspecified, the index set on the BulkService will be used.
-func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
- r.index = index
- r.source = nil
- return r
-}
-
-// Type specifies the Elasticsearch type to use for this update request.
-// If unspecified, the type set on the BulkService will be used.
-func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
- r.typ = typ
- r.source = nil
- return r
-}
-
-// Id specifies the identifier of the document to update.
-func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
- r.id = id
- r.source = nil
- return r
-}
-
-// Routing specifies a routing value for the request.
-func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
- r.routing = routing
- r.source = nil
- return r
-}
-
-// Parent specifies the identifier of the parent document (if available).
-func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
- r.parent = parent
- r.source = nil
- return r
-}
-
-// Script specifies an update script.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html#bulk-update
-// and https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
-// for details.
-func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
- r.script = script
- r.source = nil
- return r
-}
-
-// ScripedUpsert specifies if your script will run regardless of
-// whether the document exists or not.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html#_literal_scripted_upsert_literal
-func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest {
- r.scriptedUpsert = &upsert
- r.source = nil
- return r
-}
-
-// RetryOnConflict specifies how often to retry in case of a version conflict.
-func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
- r.retryOnConflict = &retryOnConflict
- r.source = nil
- return r
-}
-
-// Version indicates the version of the document as part of an optimistic
-// concurrency model.
-func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
- r.version = version
- r.source = nil
- return r
-}
-
-// VersionType can be "internal" (default), "external", "external_gte",
-// or "external_gt".
-func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
- r.versionType = versionType
- r.source = nil
- return r
-}
-
-// Doc specifies the updated document.
-func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
- r.doc = doc
- r.source = nil
- return r
-}
-
-// DocAsUpsert indicates whether the contents of Doc should be used as
-// the Upsert value.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html#_literal_doc_as_upsert_literal
-// for details.
-func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
- r.docAsUpsert = &docAsUpsert
- r.source = nil
- return r
-}
-
-// DetectNoop specifies whether changes that don't affect the document
-// should be ignored (true) or unignored (false). This is enabled by default
-// in Elasticsearch.
-func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest {
- r.detectNoop = &detectNoop
- r.source = nil
- return r
-}
-
-// Upsert specifies the document to use for upserts. It will be used for
-// create if the original document does not exist.
-func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
- r.upsert = doc
- r.source = nil
- return r
-}
-
-// ReturnSource specifies whether Elasticsearch should return the source
-// after the update. In the request, this responds to the `_source` field.
-// It is false by default.
-func (r *BulkUpdateRequest) ReturnSource(source bool) *BulkUpdateRequest {
- r.returnSource = &source
- r.source = nil
- return r
-}
-
-// String returns the on-wire representation of the update request,
-// concatenated as a single string.
-func (r *BulkUpdateRequest) String() string {
- lines, err := r.Source()
- if err != nil {
- return fmt.Sprintf("error: %v", err)
- }
- return strings.Join(lines, "\n")
-}
-
-// Source returns the on-wire representation of the update request,
-// split into an action-and-meta-data line and an (optional) source line.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for details.
-func (r *BulkUpdateRequest) Source() ([]string, error) {
- // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
- // { "doc" : { "field1" : "value1", ... } }
- // or
- // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
- // { "script" : { ... } }
-
- if r.source != nil {
- return r.source, nil
- }
-
- lines := make([]string, 2)
-
- // "update" ...
- updateCommand := bulkUpdateRequestCommandOp{
- Index: r.index,
- Type: r.typ,
- Id: r.id,
- Routing: r.routing,
- Parent: r.parent,
- Version: r.version,
- VersionType: r.versionType,
- RetryOnConflict: r.retryOnConflict,
- }
- command := bulkUpdateRequestCommand{
- "update": updateCommand,
- }
-
- var err error
- var body []byte
- if r.useEasyJSON {
- // easyjson
- body, err = command.MarshalJSON()
- } else {
- // encoding/json
- body, err = json.Marshal(command)
- }
- if err != nil {
- return nil, err
- }
-
- lines[0] = string(body)
-
- // 2nd line: {"doc" : { ... }} or {"script": {...}}
- data := bulkUpdateRequestCommandData{
- DocAsUpsert: r.docAsUpsert,
- DetectNoop: r.detectNoop,
- Upsert: r.upsert,
- ScriptedUpsert: r.scriptedUpsert,
- Doc: r.doc,
- Source: r.returnSource,
- }
- if r.script != nil {
- script, err := r.script.Source()
- if err != nil {
- return nil, err
- }
- data.Script = script
- }
-
- if r.useEasyJSON {
- // easyjson
- body, err = data.MarshalJSON()
- } else {
- // encoding/json
- body, err = json.Marshal(data)
- }
- if err != nil {
- return nil, err
- }
-
- lines[1] = string(body)
-
- r.source = lines
- return lines, nil
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go b/vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go
deleted file mode 100644
index d2c2cbfc7..000000000
--- a/vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
-
-package elastic
-
-import (
- json "encoding/json"
- easyjson "github.com/mailru/easyjson"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
-)
-
-// suppress unused package warning
-var (
- _ *json.RawMessage
- _ *jlexer.Lexer
- _ *jwriter.Writer
- _ easyjson.Marshaler
-)
-
-func easyjson1ed00e60DecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkUpdateRequestCommandOp) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeString()
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "_index":
- out.Index = string(in.String())
- case "_type":
- out.Type = string(in.String())
- case "_id":
- out.Id = string(in.String())
- case "parent":
- out.Parent = string(in.String())
- case "retry_on_conflict":
- if in.IsNull() {
- in.Skip()
- out.RetryOnConflict = nil
- } else {
- if out.RetryOnConflict == nil {
- out.RetryOnConflict = new(int)
- }
- *out.RetryOnConflict = int(in.Int())
- }
- case "routing":
- out.Routing = string(in.String())
- case "version":
- out.Version = int64(in.Int64())
- case "version_type":
- out.VersionType = string(in.String())
- default:
- in.SkipRecursive()
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-func easyjson1ed00e60EncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkUpdateRequestCommandOp) {
- out.RawByte('{')
- first := true
- _ = first
- if in.Index != "" {
- const prefix string = ",\"_index\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Index))
- }
- if in.Type != "" {
- const prefix string = ",\"_type\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Type))
- }
- if in.Id != "" {
- const prefix string = ",\"_id\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Id))
- }
- if in.Parent != "" {
- const prefix string = ",\"parent\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Parent))
- }
- if in.RetryOnConflict != nil {
- const prefix string = ",\"retry_on_conflict\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Int(int(*in.RetryOnConflict))
- }
- if in.Routing != "" {
- const prefix string = ",\"routing\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.Routing))
- }
- if in.Version != 0 {
- const prefix string = ",\"version\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Int64(int64(in.Version))
- }
- if in.VersionType != "" {
- const prefix string = ",\"version_type\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.String(string(in.VersionType))
- }
- out.RawByte('}')
-}
-
-// MarshalJSON supports json.Marshaler interface
-func (v bulkUpdateRequestCommandOp) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- easyjson1ed00e60EncodeGithubComOlivereElastic(&w, v)
- return w.Buffer.BuildBytes(), w.Error
-}
-
-// MarshalEasyJSON supports easyjson.Marshaler interface
-func (v bulkUpdateRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
- easyjson1ed00e60EncodeGithubComOlivereElastic(w, v)
-}
-
-// UnmarshalJSON supports json.Unmarshaler interface
-func (v *bulkUpdateRequestCommandOp) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- easyjson1ed00e60DecodeGithubComOlivereElastic(&r, v)
- return r.Error()
-}
-
-// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
-func (v *bulkUpdateRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
- easyjson1ed00e60DecodeGithubComOlivereElastic(l, v)
-}
-func easyjson1ed00e60DecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkUpdateRequestCommandData) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeString()
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "detect_noop":
- if in.IsNull() {
- in.Skip()
- out.DetectNoop = nil
- } else {
- if out.DetectNoop == nil {
- out.DetectNoop = new(bool)
- }
- *out.DetectNoop = bool(in.Bool())
- }
- case "doc":
- if m, ok := out.Doc.(easyjson.Unmarshaler); ok {
- m.UnmarshalEasyJSON(in)
- } else if m, ok := out.Doc.(json.Unmarshaler); ok {
- _ = m.UnmarshalJSON(in.Raw())
- } else {
- out.Doc = in.Interface()
- }
- case "doc_as_upsert":
- if in.IsNull() {
- in.Skip()
- out.DocAsUpsert = nil
- } else {
- if out.DocAsUpsert == nil {
- out.DocAsUpsert = new(bool)
- }
- *out.DocAsUpsert = bool(in.Bool())
- }
- case "script":
- if m, ok := out.Script.(easyjson.Unmarshaler); ok {
- m.UnmarshalEasyJSON(in)
- } else if m, ok := out.Script.(json.Unmarshaler); ok {
- _ = m.UnmarshalJSON(in.Raw())
- } else {
- out.Script = in.Interface()
- }
- case "scripted_upsert":
- if in.IsNull() {
- in.Skip()
- out.ScriptedUpsert = nil
- } else {
- if out.ScriptedUpsert == nil {
- out.ScriptedUpsert = new(bool)
- }
- *out.ScriptedUpsert = bool(in.Bool())
- }
- case "upsert":
- if m, ok := out.Upsert.(easyjson.Unmarshaler); ok {
- m.UnmarshalEasyJSON(in)
- } else if m, ok := out.Upsert.(json.Unmarshaler); ok {
- _ = m.UnmarshalJSON(in.Raw())
- } else {
- out.Upsert = in.Interface()
- }
- case "_source":
- if in.IsNull() {
- in.Skip()
- out.Source = nil
- } else {
- if out.Source == nil {
- out.Source = new(bool)
- }
- *out.Source = bool(in.Bool())
- }
- default:
- in.SkipRecursive()
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-func easyjson1ed00e60EncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkUpdateRequestCommandData) {
- out.RawByte('{')
- first := true
- _ = first
- if in.DetectNoop != nil {
- const prefix string = ",\"detect_noop\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Bool(bool(*in.DetectNoop))
- }
- if in.Doc != nil {
- const prefix string = ",\"doc\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- if m, ok := in.Doc.(easyjson.Marshaler); ok {
- m.MarshalEasyJSON(out)
- } else if m, ok := in.Doc.(json.Marshaler); ok {
- out.Raw(m.MarshalJSON())
- } else {
- out.Raw(json.Marshal(in.Doc))
- }
- }
- if in.DocAsUpsert != nil {
- const prefix string = ",\"doc_as_upsert\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Bool(bool(*in.DocAsUpsert))
- }
- if in.Script != nil {
- const prefix string = ",\"script\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- if m, ok := in.Script.(easyjson.Marshaler); ok {
- m.MarshalEasyJSON(out)
- } else if m, ok := in.Script.(json.Marshaler); ok {
- out.Raw(m.MarshalJSON())
- } else {
- out.Raw(json.Marshal(in.Script))
- }
- }
- if in.ScriptedUpsert != nil {
- const prefix string = ",\"scripted_upsert\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Bool(bool(*in.ScriptedUpsert))
- }
- if in.Upsert != nil {
- const prefix string = ",\"upsert\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- if m, ok := in.Upsert.(easyjson.Marshaler); ok {
- m.MarshalEasyJSON(out)
- } else if m, ok := in.Upsert.(json.Marshaler); ok {
- out.Raw(m.MarshalJSON())
- } else {
- out.Raw(json.Marshal(in.Upsert))
- }
- }
- if in.Source != nil {
- const prefix string = ",\"_source\":"
- if first {
- first = false
- out.RawString(prefix[1:])
- } else {
- out.RawString(prefix)
- }
- out.Bool(bool(*in.Source))
- }
- out.RawByte('}')
-}
-
-// MarshalJSON supports json.Marshaler interface
-func (v bulkUpdateRequestCommandData) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- easyjson1ed00e60EncodeGithubComOlivereElastic1(&w, v)
- return w.Buffer.BuildBytes(), w.Error
-}
-
-// MarshalEasyJSON supports easyjson.Marshaler interface
-func (v bulkUpdateRequestCommandData) MarshalEasyJSON(w *jwriter.Writer) {
- easyjson1ed00e60EncodeGithubComOlivereElastic1(w, v)
-}
-
-// UnmarshalJSON supports json.Unmarshaler interface
-func (v *bulkUpdateRequestCommandData) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- easyjson1ed00e60DecodeGithubComOlivereElastic1(&r, v)
- return r.Error()
-}
-
-// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
-func (v *bulkUpdateRequestCommandData) UnmarshalEasyJSON(l *jlexer.Lexer) {
- easyjson1ed00e60DecodeGithubComOlivereElastic1(l, v)
-}
-func easyjson1ed00e60DecodeGithubComOlivereElastic2(in *jlexer.Lexer, out *bulkUpdateRequestCommand) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- in.Skip()
- } else {
- in.Delim('{')
- if !in.IsDelim('}') {
- *out = make(bulkUpdateRequestCommand)
- } else {
- *out = nil
- }
- for !in.IsDelim('}') {
- key := string(in.String())
- in.WantColon()
- var v1 bulkUpdateRequestCommandOp
- (v1).UnmarshalEasyJSON(in)
- (*out)[key] = v1
- in.WantComma()
- }
- in.Delim('}')
- }
- if isTopLevel {
- in.Consumed()
- }
-}
-func easyjson1ed00e60EncodeGithubComOlivereElastic2(out *jwriter.Writer, in bulkUpdateRequestCommand) {
- if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
- out.RawString(`null`)
- } else {
- out.RawByte('{')
- v2First := true
- for v2Name, v2Value := range in {
- if v2First {
- v2First = false
- } else {
- out.RawByte(',')
- }
- out.String(string(v2Name))
- out.RawByte(':')
- (v2Value).MarshalEasyJSON(out)
- }
- out.RawByte('}')
- }
-}
-
-// MarshalJSON supports json.Marshaler interface
-func (v bulkUpdateRequestCommand) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- easyjson1ed00e60EncodeGithubComOlivereElastic2(&w, v)
- return w.Buffer.BuildBytes(), w.Error
-}
-
-// MarshalEasyJSON supports easyjson.Marshaler interface
-func (v bulkUpdateRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
- easyjson1ed00e60EncodeGithubComOlivereElastic2(w, v)
-}
-
-// UnmarshalJSON supports json.Unmarshaler interface
-func (v *bulkUpdateRequestCommand) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- easyjson1ed00e60DecodeGithubComOlivereElastic2(&r, v)
- return r.Error()
-}
-
-// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
-func (v *bulkUpdateRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
- easyjson1ed00e60DecodeGithubComOlivereElastic2(l, v)
-}
diff --git a/vendor/github.com/olivere/elastic/bulk_update_request_test.go b/vendor/github.com/olivere/elastic/bulk_update_request_test.go
deleted file mode 100644
index 53e73bd40..000000000
--- a/vendor/github.com/olivere/elastic/bulk_update_request_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "testing"
-)
-
-func TestBulkUpdateRequestSerialization(t *testing.T) {
- tests := []struct {
- Request BulkableRequest
- Expected []string
- }{
- // #0
- {
- Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- }),
- Expected: []string{
- `{"update":{"_index":"index1","_type":"doc","_id":"1"}}`,
- `{"doc":{"counter":42}}`,
- },
- },
- // #1
- {
- Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
- Routing("123").
- RetryOnConflict(3).
- DocAsUpsert(true).
- Doc(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- }),
- Expected: []string{
- `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3,"routing":"123"}}`,
- `{"doc":{"counter":42},"doc_as_upsert":true}`,
- },
- },
- // #2
- {
- Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
- RetryOnConflict(3).
- Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
- Upsert(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- }),
- Expected: []string{
- `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3}}`,
- `{"script":{"lang":"javascript","params":{"param1":42},"source":"ctx._source.retweets += param1"},"upsert":{"counter":42}}`,
- },
- },
- // #3
- {
- Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").DetectNoop(true).Doc(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- }),
- Expected: []string{
- `{"update":{"_index":"index1","_type":"doc","_id":"1"}}`,
- `{"detect_noop":true,"doc":{"counter":42}}`,
- },
- },
- // #4
- {
- Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
- RetryOnConflict(3).
- ScriptedUpsert(true).
- Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
- Upsert(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- }),
- Expected: []string{
- `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3}}`,
- `{"script":{"lang":"javascript","params":{"param1":42},"source":"ctx._source.retweets += param1"},"scripted_upsert":true,"upsert":{"counter":42}}`,
- },
- },
- // #5
- {
- Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("4").ReturnSource(true).Doc(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- }),
- Expected: []string{
- `{"update":{"_index":"index1","_type":"doc","_id":"4"}}`,
- `{"doc":{"counter":42},"_source":true}`,
- },
- },
- }
-
- for i, test := range tests {
- lines, err := test.Request.Source()
- if err != nil {
- t.Fatalf("#%d: expected no error, got: %v", i, err)
- }
- if lines == nil {
- t.Fatalf("#%d: expected lines, got nil", i)
- }
- if len(lines) != len(test.Expected) {
- t.Fatalf("#%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
- }
- for j, line := range lines {
- if line != test.Expected[j] {
- t.Errorf("#%d: expected line #%d to be\n%s\nbut got:\n%s", i, j, test.Expected[j], line)
- }
- }
- }
-}
-
-var bulkUpdateRequestSerializationResult string
-
-func BenchmarkBulkUpdateRequestSerialization(b *testing.B) {
- b.Run("stdlib", func(b *testing.B) {
- r := NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- })
- benchmarkBulkUpdateRequestSerialization(b, r.UseEasyJSON(false))
- })
- b.Run("easyjson", func(b *testing.B) {
- r := NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
- }).UseEasyJSON(false)
- benchmarkBulkUpdateRequestSerialization(b, r.UseEasyJSON(true))
- })
-}
-
-func benchmarkBulkUpdateRequestSerialization(b *testing.B, r *BulkUpdateRequest) {
- var s string
- for n := 0; n < b.N; n++ {
- s = r.String()
- r.source = nil // Don't let caching spoil the benchmark
- }
- bulkUpdateRequestSerializationResult = s // ensure the compiler doesn't optimize
- b.ReportAllocs()
-}
diff --git a/vendor/github.com/olivere/elastic/canonicalize.go b/vendor/github.com/olivere/elastic/canonicalize.go
deleted file mode 100644
index a436f03b6..000000000
--- a/vendor/github.com/olivere/elastic/canonicalize.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "net/url"
-
-// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
-// remove anything but scheme, userinfo, host, path, and port.
-// It also removes all trailing slashes. Invalid URLs or URLs that do not
-// use protocol http or https are skipped.
-//
-// Example:
-// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
-// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
-func canonicalize(rawurls ...string) []string {
- var canonicalized []string
- for _, rawurl := range rawurls {
- u, err := url.Parse(rawurl)
- if err == nil {
- if u.Scheme == "http" || u.Scheme == "https" {
- // Trim trailing slashes
- for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
- u.Path = u.Path[0 : len(u.Path)-1]
- }
- u.Fragment = ""
- u.RawQuery = ""
- canonicalized = append(canonicalized, u.String())
- }
- }
- }
- return canonicalized
-}
diff --git a/vendor/github.com/olivere/elastic/canonicalize_test.go b/vendor/github.com/olivere/elastic/canonicalize_test.go
deleted file mode 100644
index 86b62d498..000000000
--- a/vendor/github.com/olivere/elastic/canonicalize_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestCanonicalize(t *testing.T) {
- tests := []struct {
- Input []string
- Output []string
- }{
- // #0
- {
- Input: []string{"http://127.0.0.1/"},
- Output: []string{"http://127.0.0.1"},
- },
- // #1
- {
- Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"},
- Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"},
- },
- // #2
- {
- Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"},
- Output: []string{"http://user:secret@127.0.0.1/path"},
- },
- // #3
- {
- Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"},
- Output: []string{"https://somewhere.on.mars:9999/path"},
- },
- // #4
- {
- Input: []string{"https://prod1:9999/one?query=1#fragment", "https://prod2:9998/two?query=1#fragment"},
- Output: []string{"https://prod1:9999/one", "https://prod2:9998/two"},
- },
- // #5
- {
- Input: []string{"http://127.0.0.1/one/"},
- Output: []string{"http://127.0.0.1/one"},
- },
- // #6
- {
- Input: []string{"http://127.0.0.1/one///"},
- Output: []string{"http://127.0.0.1/one"},
- },
- // #7: Invalid URL
- {
- Input: []string{"127.0.0.1/"},
- Output: []string{},
- },
- // #8: Invalid URL
- {
- Input: []string{"127.0.0.1:9200"},
- Output: []string{},
- },
- }
-
- for i, test := range tests {
- got := canonicalize(test.Input...)
- if want, have := len(test.Output), len(got); want != have {
- t.Fatalf("#%d: expected %d elements; got: %d", i, want, have)
- }
- for i := 0; i < len(got); i++ {
- if want, have := test.Output[i], got[i]; want != have {
- t.Errorf("#%d: expected %q; got: %q", i, want, have)
- }
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/clear_scroll.go b/vendor/github.com/olivere/elastic/clear_scroll.go
deleted file mode 100644
index 4f449504c..000000000
--- a/vendor/github.com/olivere/elastic/clear_scroll.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-)
-
-// ClearScrollService clears one or more scroll contexts by their ids.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#_clear_scroll_api
-// for details.
-type ClearScrollService struct {
- client *Client
- pretty bool
- scrollId []string
-}
-
-// NewClearScrollService creates a new ClearScrollService.
-func NewClearScrollService(client *Client) *ClearScrollService {
- return &ClearScrollService{
- client: client,
- scrollId: make([]string, 0),
- }
-}
-
-// ScrollId is a list of scroll IDs to clear.
-// Use _all to clear all search contexts.
-func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService {
- s.scrollId = append(s.scrollId, scrollIds...)
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *ClearScrollService) buildURL() (string, url.Values, error) {
- // Build URL
- path := "/_search/scroll/"
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *ClearScrollService) Validate() error {
- var invalid []string
- if len(s.scrollId) == 0 {
- invalid = append(invalid, "ScrollId")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- body := map[string][]string{
- "scroll_id": s.scrollId,
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "DELETE",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(ClearScrollResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// ClearScrollResponse is the response of ClearScrollService.Do.
-type ClearScrollResponse struct {
-}
diff --git a/vendor/github.com/olivere/elastic/clear_scroll_test.go b/vendor/github.com/olivere/elastic/clear_scroll_test.go
deleted file mode 100644
index 4037d3cd6..000000000
--- a/vendor/github.com/olivere/elastic/clear_scroll_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- _ "net/http"
- "testing"
-)
-
-func TestClearScroll(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- res, err := client.Scroll(testIndexName).Size(1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected results != nil; got nil")
- }
- if res.ScrollId == "" {
- t.Fatalf("expected scrollId in results; got %q", res.ScrollId)
- }
-
- // Search should succeed
- _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Clear scroll id
- clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if clearScrollRes == nil {
- t.Fatal("expected results != nil; got nil")
- }
-
- // Search result should fail
- _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected scroll to fail")
- }
-}
-
-func TestClearScrollValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No scroll id -> fail with error
- res, err := NewClearScrollService(client).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected ClearScroll to fail without scroll ids")
- }
- if res != nil {
- t.Fatalf("expected result to be nil; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/client.go b/vendor/github.com/olivere/elastic/client.go
deleted file mode 100644
index 165a30526..000000000
--- a/vendor/github.com/olivere/elastic/client.go
+++ /dev/null
@@ -1,1780 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "log"
- "net/http"
- "net/http/httputil"
- "net/url"
- "os"
- "regexp"
- "strings"
- "sync"
- "time"
-
- "github.com/pkg/errors"
-
- "github.com/olivere/elastic/config"
-)
-
-const (
- // Version is the current version of Elastic.
- Version = "6.1.7"
-
- // DefaultURL is the default endpoint of Elasticsearch on the local machine.
- // It is used e.g. when initializing a new Client without a specific URL.
- DefaultURL = "http://127.0.0.1:9200"
-
- // DefaultScheme is the default protocol scheme to use when sniffing
- // the Elasticsearch cluster.
- DefaultScheme = "http"
-
- // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
- DefaultHealthcheckEnabled = true
-
- // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
- // for a response from Elasticsearch on startup, i.e. when creating a
- // client. After the client is started, a shorter timeout is commonly used
- // (its default is specified in DefaultHealthcheckTimeout).
- DefaultHealthcheckTimeoutStartup = 5 * time.Second
-
- // DefaultHealthcheckTimeout specifies the time a running client waits for
- // a response from Elasticsearch. Notice that the healthcheck timeout
- // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
- DefaultHealthcheckTimeout = 1 * time.Second
-
- // DefaultHealthcheckInterval is the default interval between
- // two health checks of the nodes in the cluster.
- DefaultHealthcheckInterval = 60 * time.Second
-
- // DefaultSnifferEnabled specifies if the sniffer is enabled by default.
- DefaultSnifferEnabled = true
-
- // DefaultSnifferInterval is the interval between two sniffing procedures,
- // i.e. the lookup of all nodes in the cluster and their addition/removal
- // from the list of actual connections.
- DefaultSnifferInterval = 15 * time.Minute
-
- // DefaultSnifferTimeoutStartup is the default timeout for the sniffing
- // process that is initiated while creating a new client. For subsequent
- // sniffing processes, DefaultSnifferTimeout is used (by default).
- DefaultSnifferTimeoutStartup = 5 * time.Second
-
- // DefaultSnifferTimeout is the default timeout after which the
- // sniffing process times out. Notice that for the initial sniffing
- // process, DefaultSnifferTimeoutStartup is used.
- DefaultSnifferTimeout = 2 * time.Second
-
- // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending
- // a GET request with a body.
- DefaultSendGetBodyAs = "GET"
-
- // off is used to disable timeouts.
- off = -1 * time.Second
-)
-
-var (
- // ErrNoClient is raised when no Elasticsearch node is available.
- ErrNoClient = errors.New("no Elasticsearch node available")
-
- // ErrRetry is raised when a request cannot be executed after the configured
- // number of retries.
- ErrRetry = errors.New("cannot connect after several retries")
-
- // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
- // didn't return in time.
- ErrTimeout = errors.New("timeout")
-
- // noRetries is a retrier that does not retry.
- noRetries = NewStopRetrier()
-)
-
-// ClientOptionFunc is a function that configures a Client.
-// It is used in NewClient.
-type ClientOptionFunc func(*Client) error
-
-// Client is an Elasticsearch client. Create one by calling NewClient.
-type Client struct {
- c *http.Client // net/http Client to use for requests
-
- connsMu sync.RWMutex // connsMu guards the next block
- conns []*conn // all connections
- cindex int // index into conns
-
- mu sync.RWMutex // guards the next block
- urls []string // set of URLs passed initially to the client
- running bool // true if the client's background processes are running
- errorlog Logger // error log for critical messages
- infolog Logger // information log for e.g. response times
- tracelog Logger // trace log for debugging
- scheme string // http or https
- healthcheckEnabled bool // healthchecks enabled or disabled
- healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
- healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch
- healthcheckInterval time.Duration // interval between healthchecks
- healthcheckStop chan bool // notify healthchecker to stop, and notify back
- snifferEnabled bool // sniffer enabled or disabled
- snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
- snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
- snifferInterval time.Duration // interval between sniffing
- snifferCallback SnifferCallback // callback to modify the sniffing decision
- snifferStop chan bool // notify sniffer to stop, and notify back
- decoder Decoder // used to decode data sent from Elasticsearch
- basicAuth bool // indicates whether to send HTTP Basic Auth credentials
- basicAuthUsername string // username for HTTP Basic Auth
- basicAuthPassword string // password for HTTP Basic Auth
- sendGetBodyAs string // override for when sending a GET with a body
- requiredPlugins []string // list of required plugins
- retrier Retrier // strategy for retries
-}
-
-// NewClient creates a new client to work with Elasticsearch.
-//
-// NewClient, by default, is meant to be long-lived and shared across
-// your application. If you need a short-lived client, e.g. for request-scope,
-// consider using NewSimpleClient instead.
-//
-// The caller can configure the new client by passing configuration options
-// to the func.
-//
-// Example:
-//
-// client, err := elastic.NewClient(
-// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"),
-// elastic.SetBasicAuth("user", "secret"))
-//
-// If no URL is configured, Elastic uses DefaultURL by default.
-//
-// If the sniffer is enabled (the default), the new client then sniffes
-// the cluster via the Nodes Info API
-// (see https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-nodes-info.html#cluster-nodes-info).
-// It uses the URLs specified by the caller. The caller is responsible
-// to only pass a list of URLs of nodes that belong to the same cluster.
-// This sniffing process is run on startup and periodically.
-// Use SnifferInterval to set the interval between two sniffs (default is
-// 15 minutes). In other words: By default, the client will find new nodes
-// in the cluster and remove those that are no longer available every
-// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
-//
-// The list of nodes found in the sniffing process will be used to make
-// connections to the REST API of Elasticsearch. These nodes are also
-// periodically checked in a shorter time frame. This process is called
-// a health check. By default, a health check is done every 60 seconds.
-// You can set a shorter or longer interval by SetHealthcheckInterval.
-// Disabling health checks is not recommended, but can be done by
-// SetHealthcheck(false).
-//
-// Connections are automatically marked as dead or healthy while
-// making requests to Elasticsearch. When a request fails, Elastic will
-// call into the Retry strategy which can be specified with SetRetry.
-// The Retry strategy is also responsible for handling backoff i.e. the time
-// to wait before starting the next request. There are various standard
-// backoff implementations, e.g. ExponentialBackoff or SimpleBackoff.
-// Retries are disabled by default.
-//
-// If no HttpClient is configured, then http.DefaultClient is used.
-// You can use your own http.Client with some http.Transport for
-// advanced scenarios.
-//
-// An error is also returned when some configuration option is invalid or
-// the new client cannot sniff the cluster (if enabled).
-func NewClient(options ...ClientOptionFunc) (*Client, error) {
- // Set up the client
- c := &Client{
- c: http.DefaultClient,
- conns: make([]*conn, 0),
- cindex: -1,
- scheme: DefaultScheme,
- decoder: &DefaultDecoder{},
- healthcheckEnabled: DefaultHealthcheckEnabled,
- healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
- healthcheckTimeout: DefaultHealthcheckTimeout,
- healthcheckInterval: DefaultHealthcheckInterval,
- healthcheckStop: make(chan bool),
- snifferEnabled: DefaultSnifferEnabled,
- snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
- snifferTimeout: DefaultSnifferTimeout,
- snifferInterval: DefaultSnifferInterval,
- snifferCallback: nopSnifferCallback,
- snifferStop: make(chan bool),
- sendGetBodyAs: DefaultSendGetBodyAs,
- retrier: noRetries, // no retries by default
- }
-
- // Run the options on it
- for _, option := range options {
- if err := option(c); err != nil {
- return nil, err
- }
- }
-
- // Use a default URL and normalize them
- if len(c.urls) == 0 {
- c.urls = []string{DefaultURL}
- }
- c.urls = canonicalize(c.urls...)
-
- // If the URLs have auth info, use them here as an alternative to SetBasicAuth
- if !c.basicAuth {
- for _, urlStr := range c.urls {
- u, err := url.Parse(urlStr)
- if err == nil && u.User != nil {
- c.basicAuth = true
- c.basicAuthUsername = u.User.Username()
- c.basicAuthPassword, _ = u.User.Password()
- break
- }
- }
- }
-
- // Check if we can make a request to any of the specified URLs
- if c.healthcheckEnabled {
- if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil {
- return nil, err
- }
- }
-
- if c.snifferEnabled {
- // Sniff the cluster initially
- if err := c.sniff(c.snifferTimeoutStartup); err != nil {
- return nil, err
- }
- } else {
- // Do not sniff the cluster initially. Use the provided URLs instead.
- for _, url := range c.urls {
- c.conns = append(c.conns, newConn(url, url))
- }
- }
-
- if c.healthcheckEnabled {
- // Perform an initial health check
- c.healthcheck(c.healthcheckTimeoutStartup, true)
- }
- // Ensure that we have at least one connection available
- if err := c.mustActiveConn(); err != nil {
- return nil, err
- }
-
- // Check the required plugins
- for _, plugin := range c.requiredPlugins {
- found, err := c.HasPlugin(plugin)
- if err != nil {
- return nil, err
- }
- if !found {
- return nil, fmt.Errorf("elastic: plugin %s not found", plugin)
- }
- }
-
- if c.snifferEnabled {
- go c.sniffer() // periodically update cluster information
- }
- if c.healthcheckEnabled {
- go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
- }
-
- c.mu.Lock()
- c.running = true
- c.mu.Unlock()
-
- return c, nil
-}
-
-// NewClientFromConfig initializes a client from a configuration.
-func NewClientFromConfig(cfg *config.Config) (*Client, error) {
- var options []ClientOptionFunc
- if cfg != nil {
- if cfg.URL != "" {
- options = append(options, SetURL(cfg.URL))
- }
- if cfg.Errorlog != "" {
- f, err := os.OpenFile(cfg.Errorlog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- return nil, errors.Wrap(err, "unable to initialize error log")
- }
- l := log.New(f, "", 0)
- options = append(options, SetErrorLog(l))
- }
- if cfg.Tracelog != "" {
- f, err := os.OpenFile(cfg.Tracelog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- return nil, errors.Wrap(err, "unable to initialize trace log")
- }
- l := log.New(f, "", 0)
- options = append(options, SetTraceLog(l))
- }
- if cfg.Infolog != "" {
- f, err := os.OpenFile(cfg.Infolog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- return nil, errors.Wrap(err, "unable to initialize info log")
- }
- l := log.New(f, "", 0)
- options = append(options, SetInfoLog(l))
- }
- if cfg.Username != "" || cfg.Password != "" {
- options = append(options, SetBasicAuth(cfg.Username, cfg.Password))
- }
- if cfg.Sniff != nil {
- options = append(options, SetSniff(*cfg.Sniff))
- }
- }
- return NewClient(options...)
-}
-
-// NewSimpleClient creates a new short-lived Client that can be used in
-// use cases where you need e.g. one client per request.
-//
-// While NewClient by default sets up e.g. periodic health checks
-// and sniffing for new nodes in separate goroutines, NewSimpleClient does
-// not and is meant as a simple replacement where you don't need all the
-// heavy lifting of NewClient.
-//
-// NewSimpleClient does the following by default: First, all health checks
-// are disabled, including timeouts and periodic checks. Second, sniffing
-// is disabled, including timeouts and periodic checks. The number of retries
-// is set to 1. NewSimpleClient also does not start any goroutines.
-//
-// Notice that you can still override settings by passing additional options,
-// just like with NewClient.
-func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
- c := &Client{
- c: http.DefaultClient,
- conns: make([]*conn, 0),
- cindex: -1,
- scheme: DefaultScheme,
- decoder: &DefaultDecoder{},
- healthcheckEnabled: false,
- healthcheckTimeoutStartup: off,
- healthcheckTimeout: off,
- healthcheckInterval: off,
- healthcheckStop: make(chan bool),
- snifferEnabled: false,
- snifferTimeoutStartup: off,
- snifferTimeout: off,
- snifferInterval: off,
- snifferCallback: nopSnifferCallback,
- snifferStop: make(chan bool),
- sendGetBodyAs: DefaultSendGetBodyAs,
- retrier: noRetries, // no retries by default
- }
-
- // Run the options on it
- for _, option := range options {
- if err := option(c); err != nil {
- return nil, err
- }
- }
-
- // Use a default URL and normalize them
- if len(c.urls) == 0 {
- c.urls = []string{DefaultURL}
- }
- c.urls = canonicalize(c.urls...)
-
- // If the URLs have auth info, use them here as an alternative to SetBasicAuth
- if !c.basicAuth {
- for _, urlStr := range c.urls {
- u, err := url.Parse(urlStr)
- if err == nil && u.User != nil {
- c.basicAuth = true
- c.basicAuthUsername = u.User.Username()
- c.basicAuthPassword, _ = u.User.Password()
- break
- }
- }
- }
-
- for _, url := range c.urls {
- c.conns = append(c.conns, newConn(url, url))
- }
-
- // Ensure that we have at least one connection available
- if err := c.mustActiveConn(); err != nil {
- return nil, err
- }
-
- // Check the required plugins
- for _, plugin := range c.requiredPlugins {
- found, err := c.HasPlugin(plugin)
- if err != nil {
- return nil, err
- }
- if !found {
- return nil, fmt.Errorf("elastic: plugin %s not found", plugin)
- }
- }
-
- c.mu.Lock()
- c.running = true
- c.mu.Unlock()
-
- return c, nil
-}
-
-// SetHttpClient can be used to specify the http.Client to use when making
-// HTTP requests to Elasticsearch.
-func SetHttpClient(httpClient *http.Client) ClientOptionFunc {
- return func(c *Client) error {
- if httpClient != nil {
- c.c = httpClient
- } else {
- c.c = http.DefaultClient
- }
- return nil
- }
-}
-
-// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to
-// use when making HTTP requests to Elasticsearch.
-func SetBasicAuth(username, password string) ClientOptionFunc {
- return func(c *Client) error {
- c.basicAuthUsername = username
- c.basicAuthPassword = password
- c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != ""
- return nil
- }
-}
-
-// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
-// when sniffing is enabled, these URLs are used to initially sniff the
-// cluster on startup.
-func SetURL(urls ...string) ClientOptionFunc {
- return func(c *Client) error {
- switch len(urls) {
- case 0:
- c.urls = []string{DefaultURL}
- default:
- c.urls = urls
- }
- return nil
- }
-}
-
-// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
-// This is http by default.
-func SetScheme(scheme string) ClientOptionFunc {
- return func(c *Client) error {
- c.scheme = scheme
- return nil
- }
-}
-
-// SetSniff enables or disables the sniffer (enabled by default).
-func SetSniff(enabled bool) ClientOptionFunc {
- return func(c *Client) error {
- c.snifferEnabled = enabled
- return nil
- }
-}
-
-// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used
-// when creating a new client. The default is 5 seconds. Notice that the
-// timeout being used for subsequent sniffing processes is set with
-// SetSnifferTimeout.
-func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc {
- return func(c *Client) error {
- c.snifferTimeoutStartup = timeout
- return nil
- }
-}
-
-// SetSnifferTimeout sets the timeout for the sniffer that finds the
-// nodes in a cluster. The default is 2 seconds. Notice that the timeout
-// used when creating a new client on startup is usually greater and can
-// be set with SetSnifferTimeoutStartup.
-func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
- return func(c *Client) error {
- c.snifferTimeout = timeout
- return nil
- }
-}
-
-// SetSnifferInterval sets the interval between two sniffing processes.
-// The default interval is 15 minutes.
-func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
- return func(c *Client) error {
- c.snifferInterval = interval
- return nil
- }
-}
-
-// SnifferCallback defines the protocol for sniffing decisions.
-type SnifferCallback func(*NodesInfoNode) bool
-
-// nopSnifferCallback is the default sniffer callback: It accepts
-// all nodes the sniffer finds.
-var nopSnifferCallback = func(*NodesInfoNode) bool { return true }
-
-// SetSnifferCallback allows the caller to modify sniffer decisions.
-// When setting the callback, the given SnifferCallback is called for
-// each (healthy) node found during the sniffing process.
-// If the callback returns false, the node is ignored: No requests
-// are routed to it.
-func SetSnifferCallback(f SnifferCallback) ClientOptionFunc {
- return func(c *Client) error {
- if f != nil {
- c.snifferCallback = f
- }
- return nil
- }
-}
-
-// SetHealthcheck enables or disables healthchecks (enabled by default).
-func SetHealthcheck(enabled bool) ClientOptionFunc {
- return func(c *Client) error {
- c.healthcheckEnabled = enabled
- return nil
- }
-}
-
-// SetHealthcheckTimeoutStartup sets the timeout for the initial health check.
-// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup).
-// Notice that timeouts for subsequent health checks can be modified with
-// SetHealthcheckTimeout.
-func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc {
- return func(c *Client) error {
- c.healthcheckTimeoutStartup = timeout
- return nil
- }
-}
-
-// SetHealthcheckTimeout sets the timeout for periodic health checks.
-// The default timeout is 1 second (see DefaultHealthcheckTimeout).
-// Notice that a different (usually larger) timeout is used for the initial
-// healthcheck, which is initiated while creating a new client.
-// The startup timeout can be modified with SetHealthcheckTimeoutStartup.
-func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc {
- return func(c *Client) error {
- c.healthcheckTimeout = timeout
- return nil
- }
-}
-
-// SetHealthcheckInterval sets the interval between two health checks.
-// The default interval is 60 seconds.
-func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
- return func(c *Client) error {
- c.healthcheckInterval = interval
- return nil
- }
-}
-
-// SetMaxRetries sets the maximum number of retries before giving up when
-// performing a HTTP request to Elasticsearch.
-//
-// Deprecated: Replace with a Retry implementation.
-func SetMaxRetries(maxRetries int) ClientOptionFunc {
- return func(c *Client) error {
- if maxRetries < 0 {
- return errors.New("MaxRetries must be greater than or equal to 0")
- } else if maxRetries == 0 {
- c.retrier = noRetries
- } else {
- // Create a Retrier that will wait for 100ms (+/- jitter) between requests.
- // This resembles the old behavior with maxRetries.
- ticks := make([]int, maxRetries)
- for i := 0; i < len(ticks); i++ {
- ticks[i] = 100
- }
- backoff := NewSimpleBackoff(ticks...)
- c.retrier = NewBackoffRetrier(backoff)
- }
- return nil
- }
-}
-
-// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
-// DefaultDecoder is used by default.
-func SetDecoder(decoder Decoder) ClientOptionFunc {
- return func(c *Client) error {
- if decoder != nil {
- c.decoder = decoder
- } else {
- c.decoder = &DefaultDecoder{}
- }
- return nil
- }
-}
-
-// SetRequiredPlugins can be used to indicate that some plugins are required
-// before a Client will be created.
-func SetRequiredPlugins(plugins ...string) ClientOptionFunc {
- return func(c *Client) error {
- if c.requiredPlugins == nil {
- c.requiredPlugins = make([]string, 0)
- }
- c.requiredPlugins = append(c.requiredPlugins, plugins...)
- return nil
- }
-}
-
-// SetErrorLog sets the logger for critical messages like nodes joining
-// or leaving the cluster or failing requests. It is nil by default.
-func SetErrorLog(logger Logger) ClientOptionFunc {
- return func(c *Client) error {
- c.errorlog = logger
- return nil
- }
-}
-
-// SetInfoLog sets the logger for informational messages, e.g. requests
-// and their response times. It is nil by default.
-func SetInfoLog(logger Logger) ClientOptionFunc {
- return func(c *Client) error {
- c.infolog = logger
- return nil
- }
-}
-
-// SetTraceLog specifies the log.Logger to use for output of HTTP requests
-// and responses which is helpful during debugging. It is nil by default.
-func SetTraceLog(logger Logger) ClientOptionFunc {
- return func(c *Client) error {
- c.tracelog = logger
- return nil
- }
-}
-
-// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request
-// with a body. It is GET by default.
-func SetSendGetBodyAs(httpMethod string) ClientOptionFunc {
- return func(c *Client) error {
- c.sendGetBodyAs = httpMethod
- return nil
- }
-}
-
-// SetRetrier specifies the retry strategy that handles errors during
-// HTTP request/response with Elasticsearch.
-func SetRetrier(retrier Retrier) ClientOptionFunc {
- return func(c *Client) error {
- if retrier == nil {
- retrier = noRetries // no retries by default
- }
- c.retrier = retrier
- return nil
- }
-}
-
-// String returns a string representation of the client status.
-func (c *Client) String() string {
- c.connsMu.Lock()
- conns := c.conns
- c.connsMu.Unlock()
-
- var buf bytes.Buffer
- for i, conn := range conns {
- if i > 0 {
- buf.WriteString(", ")
- }
- buf.WriteString(conn.String())
- }
- return buf.String()
-}
-
-// IsRunning returns true if the background processes of the client are
-// running, false otherwise.
-func (c *Client) IsRunning() bool {
- c.mu.RLock()
- defer c.mu.RUnlock()
- return c.running
-}
-
-// Start starts the background processes like sniffing the cluster and
-// periodic health checks. You don't need to run Start when creating a
-// client with NewClient; the background processes are run by default.
-//
-// If the background processes are already running, this is a no-op.
-func (c *Client) Start() {
- c.mu.RLock()
- if c.running {
- c.mu.RUnlock()
- return
- }
- c.mu.RUnlock()
-
- if c.snifferEnabled {
- go c.sniffer()
- }
- if c.healthcheckEnabled {
- go c.healthchecker()
- }
-
- c.mu.Lock()
- c.running = true
- c.mu.Unlock()
-
- c.infof("elastic: client started")
-}
-
-// Stop stops the background processes that the client is running,
-// i.e. sniffing the cluster periodically and running health checks
-// on the nodes.
-//
-// If the background processes are not running, this is a no-op.
-func (c *Client) Stop() {
- c.mu.RLock()
- if !c.running {
- c.mu.RUnlock()
- return
- }
- c.mu.RUnlock()
-
- if c.healthcheckEnabled {
- c.healthcheckStop <- true
- <-c.healthcheckStop
- }
-
- if c.snifferEnabled {
- c.snifferStop <- true
- <-c.snifferStop
- }
-
- c.mu.Lock()
- c.running = false
- c.mu.Unlock()
-
- c.infof("elastic: client stopped")
-}
-
-// errorf logs to the error log.
-func (c *Client) errorf(format string, args ...interface{}) {
- if c.errorlog != nil {
- c.errorlog.Printf(format, args...)
- }
-}
-
-// infof logs informational messages.
-func (c *Client) infof(format string, args ...interface{}) {
- if c.infolog != nil {
- c.infolog.Printf(format, args...)
- }
-}
-
-// tracef logs to the trace log.
-func (c *Client) tracef(format string, args ...interface{}) {
- if c.tracelog != nil {
- c.tracelog.Printf(format, args...)
- }
-}
-
-// dumpRequest dumps the given HTTP request to the trace log.
-func (c *Client) dumpRequest(r *http.Request) {
- if c.tracelog != nil {
- out, err := httputil.DumpRequestOut(r, true)
- if err == nil {
- c.tracef("%s\n", string(out))
- }
- }
-}
-
-// dumpResponse dumps the given HTTP response to the trace log.
-func (c *Client) dumpResponse(resp *http.Response) {
- if c.tracelog != nil {
- out, err := httputil.DumpResponse(resp, true)
- if err == nil {
- c.tracef("%s\n", string(out))
- }
- }
-}
-
-// sniffer periodically runs sniff.
-func (c *Client) sniffer() {
- c.mu.RLock()
- timeout := c.snifferTimeout
- interval := c.snifferInterval
- c.mu.RUnlock()
-
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-c.snifferStop:
- // we are asked to stop, so we signal back that we're stopping now
- c.snifferStop <- true
- return
- case <-ticker.C:
- c.sniff(timeout)
- }
- }
-}
-
-// sniff uses the Node Info API to return the list of nodes in the cluster.
-// It uses the list of URLs passed on startup plus the list of URLs found
-// by the preceding sniffing process (if sniffing is enabled).
-//
-// If sniffing is disabled, this is a no-op.
-func (c *Client) sniff(timeout time.Duration) error {
- c.mu.RLock()
- if !c.snifferEnabled {
- c.mu.RUnlock()
- return nil
- }
-
- // Use all available URLs provided to sniff the cluster.
- var urls []string
- urlsMap := make(map[string]bool)
-
- // Add all URLs provided on startup
- for _, url := range c.urls {
- urlsMap[url] = true
- urls = append(urls, url)
- }
- c.mu.RUnlock()
-
- // Add all URLs found by sniffing
- c.connsMu.RLock()
- for _, conn := range c.conns {
- if !conn.IsDead() {
- url := conn.URL()
- if _, found := urlsMap[url]; !found {
- urls = append(urls, url)
- }
- }
- }
- c.connsMu.RUnlock()
-
- if len(urls) == 0 {
- return errors.Wrap(ErrNoClient, "no URLs found")
- }
-
- // Start sniffing on all found URLs
- ch := make(chan []*conn, len(urls))
-
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
-
- for _, url := range urls {
- go func(url string) { ch <- c.sniffNode(ctx, url) }(url)
- }
-
- // Wait for the results to come back, or the process times out.
- for {
- select {
- case conns := <-ch:
- if len(conns) > 0 {
- c.updateConns(conns)
- return nil
- }
- case <-ctx.Done():
- // We get here if no cluster responds in time
- return errors.Wrap(ErrNoClient, "sniff timeout")
- }
- }
-}
-
-// sniffNode sniffs a single node. This method is run as a goroutine
-// in sniff. If successful, it returns the list of node URLs extracted
-// from the result of calling Nodes Info API. Otherwise, an empty array
-// is returned.
-func (c *Client) sniffNode(ctx context.Context, url string) []*conn {
- var nodes []*conn
-
- // Call the Nodes Info API at /_nodes/http
- req, err := NewRequest("GET", url+"/_nodes/http")
- if err != nil {
- return nodes
- }
-
- c.mu.RLock()
- if c.basicAuth {
- req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword)
- }
- c.mu.RUnlock()
-
- res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
- if err != nil {
- return nodes
- }
- if res == nil {
- return nodes
- }
-
- if res.Body != nil {
- defer res.Body.Close()
- }
-
- var info NodesInfoResponse
- if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
- if len(info.Nodes) > 0 {
- for nodeID, node := range info.Nodes {
- if c.snifferCallback(node) {
- if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 {
- url := c.extractHostname(c.scheme, node.HTTP.PublishAddress)
- if url != "" {
- nodes = append(nodes, newConn(nodeID, url))
- }
- }
- }
- }
- }
- }
- return nodes
-}
-
-// reSniffHostAndPort is used to extract hostname and port from a result
-// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
-var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
-
-func (c *Client) extractHostname(scheme, address string) string {
- if strings.HasPrefix(address, "inet") {
- m := reSniffHostAndPort.FindStringSubmatch(address)
- if len(m) == 3 {
- return fmt.Sprintf("%s://%s:%s", scheme, m[1], m[2])
- }
- }
- s := address
- if idx := strings.Index(s, "/"); idx >= 0 {
- s = s[idx+1:]
- }
- if strings.Index(s, ":") < 0 {
- return ""
- }
- return fmt.Sprintf("%s://%s", scheme, s)
-}
-
-// updateConns updates the clients' connections with new information
-// gather by a sniff operation.
-func (c *Client) updateConns(conns []*conn) {
- c.connsMu.Lock()
-
- // Build up new connections:
- // If we find an existing connection, use that (including no. of failures etc.).
- // If we find a new connection, add it.
- var newConns []*conn
- for _, conn := range conns {
- var found bool
- for _, oldConn := range c.conns {
- if oldConn.NodeID() == conn.NodeID() {
- // Take over the old connection
- newConns = append(newConns, oldConn)
- found = true
- break
- }
- }
- if !found {
- // New connection didn't exist, so add it to our list of new conns.
- c.infof("elastic: %s joined the cluster", conn.URL())
- newConns = append(newConns, conn)
- }
- }
-
- c.conns = newConns
- c.cindex = -1
- c.connsMu.Unlock()
-}
-
-// healthchecker periodically runs healthcheck.
-func (c *Client) healthchecker() {
- c.mu.RLock()
- timeout := c.healthcheckTimeout
- interval := c.healthcheckInterval
- c.mu.RUnlock()
-
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-c.healthcheckStop:
- // we are asked to stop, so we signal back that we're stopping now
- c.healthcheckStop <- true
- return
- case <-ticker.C:
- c.healthcheck(timeout, false)
- }
- }
-}
-
-// healthcheck does a health check on all nodes in the cluster. Depending on
-// the node state, it marks connections as dead, sets them alive etc.
-// If healthchecks are disabled and force is false, this is a no-op.
-// The timeout specifies how long to wait for a response from Elasticsearch.
-func (c *Client) healthcheck(timeout time.Duration, force bool) {
- c.mu.RLock()
- if !c.healthcheckEnabled && !force {
- c.mu.RUnlock()
- return
- }
- basicAuth := c.basicAuth
- basicAuthUsername := c.basicAuthUsername
- basicAuthPassword := c.basicAuthPassword
- c.mu.RUnlock()
-
- c.connsMu.RLock()
- conns := c.conns
- c.connsMu.RUnlock()
-
- for _, conn := range conns {
- // Run the HEAD request against ES with a timeout
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
-
- // Goroutine executes the HTTP request, returns an error and sets status
- var status int
- errc := make(chan error, 1)
- go func(url string) {
- req, err := NewRequest("HEAD", url)
- if err != nil {
- errc <- err
- return
- }
- if basicAuth {
- req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
- }
- res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
- if res != nil {
- status = res.StatusCode
- if res.Body != nil {
- res.Body.Close()
- }
- }
- errc <- err
- }(conn.URL())
-
- // Wait for the Goroutine (or its timeout)
- select {
- case <-ctx.Done(): // timeout
- c.errorf("elastic: %s is dead", conn.URL())
- conn.MarkAsDead()
- case err := <-errc:
- if err != nil {
- c.errorf("elastic: %s is dead", conn.URL())
- conn.MarkAsDead()
- break
- }
- if status >= 200 && status < 300 {
- conn.MarkAsAlive()
- } else {
- conn.MarkAsDead()
- c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status)
- }
- }
- }
-}
-
-// startupHealthcheck is used at startup to check if the server is available
-// at all.
-func (c *Client) startupHealthcheck(timeout time.Duration) error {
- c.mu.Lock()
- urls := c.urls
- basicAuth := c.basicAuth
- basicAuthUsername := c.basicAuthUsername
- basicAuthPassword := c.basicAuthPassword
- c.mu.Unlock()
-
- // If we don't get a connection after "timeout", we bail.
- var lastErr error
- start := time.Now()
- for {
- // Make a copy of the HTTP client provided via options to respect
- // settings like Basic Auth or a user-specified http.Transport.
- cl := new(http.Client)
- *cl = *c.c
- cl.Timeout = timeout
- for _, url := range urls {
- req, err := http.NewRequest("HEAD", url, nil)
- if err != nil {
- return err
- }
- if basicAuth {
- req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
- }
- res, err := cl.Do(req)
- if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 {
- return nil
- } else if err != nil {
- lastErr = err
- }
- }
- time.Sleep(1 * time.Second)
- if time.Now().Sub(start) > timeout {
- break
- }
- }
- if lastErr != nil {
- return errors.Wrapf(ErrNoClient, "health check timeout: %v", lastErr)
- }
- return errors.Wrap(ErrNoClient, "health check timeout")
-}
-
-// next returns the next available connection, or ErrNoClient.
-func (c *Client) next() (*conn, error) {
- // We do round-robin here.
- // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients.
- c.connsMu.Lock()
- defer c.connsMu.Unlock()
-
- i := 0
- numConns := len(c.conns)
- for {
- i++
- if i > numConns {
- break // we visited all conns: they all seem to be dead
- }
- c.cindex++
- if c.cindex >= numConns {
- c.cindex = 0
- }
- conn := c.conns[c.cindex]
- if !conn.IsDead() {
- return conn, nil
- }
- }
-
- // We have a deadlock here: All nodes are marked as dead.
- // If sniffing is disabled, connections will never be marked alive again.
- // So we are marking them as alive--if sniffing is disabled.
- // They'll then be picked up in the next call to PerformRequest.
- if !c.snifferEnabled {
- c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns))
- for _, conn := range c.conns {
- conn.MarkAsAlive()
- }
- }
-
- // We tried hard, but there is no node available
- return nil, errors.Wrap(ErrNoClient, "no available connection")
-}
-
-// mustActiveConn returns nil if there is an active connection,
-// otherwise ErrNoClient is returned.
-func (c *Client) mustActiveConn() error {
- c.connsMu.Lock()
- defer c.connsMu.Unlock()
-
- for _, c := range c.conns {
- if !c.IsDead() {
- return nil
- }
- }
- return errors.Wrap(ErrNoClient, "no active connection found")
-}
-
-// -- PerformRequest --
-
-// PerformRequestOptions must be passed into PerformRequest.
-type PerformRequestOptions struct {
- Method string
- Path string
- Params url.Values
- Body interface{}
- ContentType string
- IgnoreErrors []int
- Retrier Retrier
-}
-
-// PerformRequest does a HTTP request to Elasticsearch.
-// It returns a response (which might be nil) and an error on failure.
-//
-// Optionally, a list of HTTP error codes to ignore can be passed.
-// This is necessary for services that expect e.g. HTTP status 404 as a
-// valid outcome (Exists, IndicesExists, IndicesTypeExists).
-func (c *Client) PerformRequest(ctx context.Context, opt PerformRequestOptions) (*Response, error) {
- start := time.Now().UTC()
-
- c.mu.RLock()
- timeout := c.healthcheckTimeout
- basicAuth := c.basicAuth
- basicAuthUsername := c.basicAuthUsername
- basicAuthPassword := c.basicAuthPassword
- sendGetBodyAs := c.sendGetBodyAs
- retrier := c.retrier
- if opt.Retrier != nil {
- retrier = opt.Retrier
- }
- c.mu.RUnlock()
-
- var err error
- var conn *conn
- var req *Request
- var resp *Response
- var retried bool
- var n int
-
- // Change method if sendGetBodyAs is specified.
- if opt.Method == "GET" && opt.Body != nil && sendGetBodyAs != "GET" {
- opt.Method = sendGetBodyAs
- }
-
- for {
- pathWithParams := opt.Path
- if len(opt.Params) > 0 {
- pathWithParams += "?" + opt.Params.Encode()
- }
-
- // Get a connection
- conn, err = c.next()
- if errors.Cause(err) == ErrNoClient {
- n++
- if !retried {
- // Force a healtcheck as all connections seem to be dead.
- c.healthcheck(timeout, false)
- }
- wait, ok, rerr := retrier.Retry(ctx, n, nil, nil, err)
- if rerr != nil {
- return nil, rerr
- }
- if !ok {
- return nil, err
- }
- retried = true
- time.Sleep(wait)
- continue // try again
- }
- if err != nil {
- c.errorf("elastic: cannot get connection from pool")
- return nil, err
- }
-
- req, err = NewRequest(opt.Method, conn.URL()+pathWithParams)
- if err != nil {
- c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(opt.Method), conn.URL()+pathWithParams, err)
- return nil, err
- }
-
- if basicAuth {
- req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
- }
- if opt.ContentType != "" {
- req.Header.Set("Content-Type", opt.ContentType)
- }
-
- // Set body
- if opt.Body != nil {
- err = req.SetBody(opt.Body)
- if err != nil {
- c.errorf("elastic: couldn't set body %+v for request: %v", opt.Body, err)
- return nil, err
- }
- }
-
- // Tracing
- c.dumpRequest((*http.Request)(req))
-
- // Get response
- res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
- if err == context.Canceled || err == context.DeadlineExceeded {
- // Proceed, but don't mark the node as dead
- return nil, err
- }
- if ue, ok := err.(*url.Error); ok {
- // This happens e.g. on redirect errors, see https://golang.org/src/net/http/client_test.go#L329
- if ue.Err == context.Canceled || ue.Err == context.DeadlineExceeded {
- // Proceed, but don't mark the node as dead
- return nil, err
- }
- }
- if err != nil {
- n++
- wait, ok, rerr := retrier.Retry(ctx, n, (*http.Request)(req), res, err)
- if rerr != nil {
- c.errorf("elastic: %s is dead", conn.URL())
- conn.MarkAsDead()
- return nil, rerr
- }
- if !ok {
- c.errorf("elastic: %s is dead", conn.URL())
- conn.MarkAsDead()
- return nil, err
- }
- retried = true
- time.Sleep(wait)
- continue // try again
- }
- if res.Body != nil {
- defer res.Body.Close()
- }
-
- // Tracing
- c.dumpResponse(res)
-
- // Log deprecation warnings as errors
- if s := res.Header.Get("Warning"); s != "" {
- c.errorf(s)
- }
-
- // Check for errors
- if err := checkResponse((*http.Request)(req), res, opt.IgnoreErrors...); err != nil {
- // No retry if request succeeded
- // We still try to return a response.
- resp, _ = c.newResponse(res)
- return resp, err
- }
-
- // We successfully made a request with this connection
- conn.MarkAsHealthy()
-
- resp, err = c.newResponse(res)
- if err != nil {
- return nil, err
- }
-
- break
- }
-
- duration := time.Now().UTC().Sub(start)
- c.infof("%s %s [status:%d, request:%.3fs]",
- strings.ToUpper(opt.Method),
- req.URL,
- resp.StatusCode,
- float64(int64(duration/time.Millisecond))/1000)
-
- return resp, nil
-}
-
-// -- Document APIs --
-
-// Index a document.
-func (c *Client) Index() *IndexService {
- return NewIndexService(c)
-}
-
-// Get a document.
-func (c *Client) Get() *GetService {
- return NewGetService(c)
-}
-
-// MultiGet retrieves multiple documents in one roundtrip.
-func (c *Client) MultiGet() *MgetService {
- return NewMgetService(c)
-}
-
-// Mget retrieves multiple documents in one roundtrip.
-func (c *Client) Mget() *MgetService {
- return NewMgetService(c)
-}
-
-// Delete a document.
-func (c *Client) Delete() *DeleteService {
- return NewDeleteService(c)
-}
-
-// DeleteByQuery deletes documents as found by a query.
-func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService {
- return NewDeleteByQueryService(c).Index(indices...)
-}
-
-// Update a document.
-func (c *Client) Update() *UpdateService {
- return NewUpdateService(c)
-}
-
-// UpdateByQuery performs an update on a set of documents.
-func (c *Client) UpdateByQuery(indices ...string) *UpdateByQueryService {
- return NewUpdateByQueryService(c).Index(indices...)
-}
-
-// Bulk is the entry point to mass insert/update/delete documents.
-func (c *Client) Bulk() *BulkService {
- return NewBulkService(c)
-}
-
-// BulkProcessor allows setting up a concurrent processor of bulk requests.
-func (c *Client) BulkProcessor() *BulkProcessorService {
- return NewBulkProcessorService(c)
-}
-
-// Reindex copies data from a source index into a destination index.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html
-// for details on the Reindex API.
-func (c *Client) Reindex() *ReindexService {
- return NewReindexService(c)
-}
-
-// TermVectors returns information and statistics on terms in the fields
-// of a particular document.
-func (c *Client) TermVectors(index, typ string) *TermvectorsService {
- builder := NewTermvectorsService(c)
- builder = builder.Index(index).Type(typ)
- return builder
-}
-
-// MultiTermVectors returns information and statistics on terms in the fields
-// of multiple documents.
-func (c *Client) MultiTermVectors() *MultiTermvectorService {
- return NewMultiTermvectorService(c)
-}
-
-// -- Search APIs --
-
-// Search is the entry point for searches.
-func (c *Client) Search(indices ...string) *SearchService {
- return NewSearchService(c).Index(indices...)
-}
-
-// MultiSearch is the entry point for multi searches.
-func (c *Client) MultiSearch() *MultiSearchService {
- return NewMultiSearchService(c)
-}
-
-// Count documents.
-func (c *Client) Count(indices ...string) *CountService {
- return NewCountService(c).Index(indices...)
-}
-
-// Explain computes a score explanation for a query and a specific document.
-func (c *Client) Explain(index, typ, id string) *ExplainService {
- return NewExplainService(c).Index(index).Type(typ).Id(id)
-}
-
-// TODO Search Template
-// TODO Search Shards API
-// TODO Search Exists API
-// TODO Validate API
-
-// FieldCaps returns statistical information about fields in indices.
-func (c *Client) FieldCaps(indices ...string) *FieldCapsService {
- return NewFieldCapsService(c).Index(indices...)
-}
-
-// Exists checks if a document exists.
-func (c *Client) Exists() *ExistsService {
- return NewExistsService(c)
-}
-
-// Scroll through documents. Use this to efficiently scroll through results
-// while returning the results to a client.
-func (c *Client) Scroll(indices ...string) *ScrollService {
- return NewScrollService(c).Index(indices...)
-}
-
-// ClearScroll can be used to clear search contexts manually.
-func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService {
- return NewClearScrollService(c).ScrollId(scrollIds...)
-}
-
-// -- Indices APIs --
-
-// CreateIndex returns a service to create a new index.
-func (c *Client) CreateIndex(name string) *IndicesCreateService {
- return NewIndicesCreateService(c).Index(name)
-}
-
-// DeleteIndex returns a service to delete an index.
-func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService {
- return NewIndicesDeleteService(c).Index(indices)
-}
-
-// IndexExists allows to check if an index exists.
-func (c *Client) IndexExists(indices ...string) *IndicesExistsService {
- return NewIndicesExistsService(c).Index(indices)
-}
-
-// ShrinkIndex returns a service to shrink one index into another.
-func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService {
- return NewIndicesShrinkService(c).Source(source).Target(target)
-}
-
-// RolloverIndex rolls an alias over to a new index when the existing index
-// is considered to be too large or too old.
-func (c *Client) RolloverIndex(alias string) *IndicesRolloverService {
- return NewIndicesRolloverService(c).Alias(alias)
-}
-
-// TypeExists allows to check if one or more types exist in one or more indices.
-func (c *Client) TypeExists() *IndicesExistsTypeService {
- return NewIndicesExistsTypeService(c)
-}
-
-// IndexStats provides statistics on different operations happining
-// in one or more indices.
-func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
- return NewIndicesStatsService(c).Index(indices...)
-}
-
-// OpenIndex opens an index.
-func (c *Client) OpenIndex(name string) *IndicesOpenService {
- return NewIndicesOpenService(c).Index(name)
-}
-
-// CloseIndex closes an index.
-func (c *Client) CloseIndex(name string) *IndicesCloseService {
- return NewIndicesCloseService(c).Index(name)
-}
-
-// IndexGet retrieves information about one or more indices.
-// IndexGet is only available for Elasticsearch 1.4 or later.
-func (c *Client) IndexGet(indices ...string) *IndicesGetService {
- return NewIndicesGetService(c).Index(indices...)
-}
-
-// IndexGetSettings retrieves settings of all, one or more indices.
-func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService {
- return NewIndicesGetSettingsService(c).Index(indices...)
-}
-
-// IndexPutSettings sets settings for all, one or more indices.
-func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService {
- return NewIndicesPutSettingsService(c).Index(indices...)
-}
-
-// IndexSegments retrieves low level segment information for all, one or more indices.
-func (c *Client) IndexSegments(indices ...string) *IndicesSegmentsService {
- return NewIndicesSegmentsService(c).Index(indices...)
-}
-
-// IndexAnalyze performs the analysis process on a text and returns the
-// token breakdown of the text.
-func (c *Client) IndexAnalyze() *IndicesAnalyzeService {
- return NewIndicesAnalyzeService(c)
-}
-
-// Forcemerge optimizes one or more indices.
-// It replaces the deprecated Optimize API.
-func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService {
- return NewIndicesForcemergeService(c).Index(indices...)
-}
-
-// Refresh asks Elasticsearch to refresh one or more indices.
-func (c *Client) Refresh(indices ...string) *RefreshService {
- return NewRefreshService(c).Index(indices...)
-}
-
-// Flush asks Elasticsearch to free memory from the index and
-// flush data to disk.
-func (c *Client) Flush(indices ...string) *IndicesFlushService {
- return NewIndicesFlushService(c).Index(indices...)
-}
-
-// Alias enables the caller to add and/or remove aliases.
-func (c *Client) Alias() *AliasService {
- return NewAliasService(c)
-}
-
-// Aliases returns aliases by index name(s).
-func (c *Client) Aliases() *AliasesService {
- return NewAliasesService(c)
-}
-
-// IndexGetTemplate gets an index template.
-// Use XXXTemplate funcs to manage search templates.
-func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
- return NewIndicesGetTemplateService(c).Name(names...)
-}
-
-// IndexTemplateExists gets check if an index template exists.
-// Use XXXTemplate funcs to manage search templates.
-func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
- return NewIndicesExistsTemplateService(c).Name(name)
-}
-
-// IndexPutTemplate creates or updates an index template.
-// Use XXXTemplate funcs to manage search templates.
-func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
- return NewIndicesPutTemplateService(c).Name(name)
-}
-
-// IndexDeleteTemplate deletes an index template.
-// Use XXXTemplate funcs to manage search templates.
-func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
- return NewIndicesDeleteTemplateService(c).Name(name)
-}
-
-// GetMapping gets a mapping.
-func (c *Client) GetMapping() *IndicesGetMappingService {
- return NewIndicesGetMappingService(c)
-}
-
-// PutMapping registers a mapping.
-func (c *Client) PutMapping() *IndicesPutMappingService {
- return NewIndicesPutMappingService(c)
-}
-
-// GetFieldMapping gets mapping for fields.
-func (c *Client) GetFieldMapping() *IndicesGetFieldMappingService {
- return NewIndicesGetFieldMappingService(c)
-}
-
-// -- cat APIs --
-
-// TODO cat aliases
-// TODO cat allocation
-// TODO cat count
-// TODO cat fielddata
-// TODO cat health
-// TODO cat indices
-// TODO cat master
-// TODO cat nodes
-// TODO cat pending tasks
-// TODO cat plugins
-// TODO cat recovery
-// TODO cat thread pool
-// TODO cat shards
-// TODO cat segments
-
-// -- Ingest APIs --
-
-// IngestPutPipeline adds pipelines and updates existing pipelines in
-// the cluster.
-func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService {
- return NewIngestPutPipelineService(c).Id(id)
-}
-
-// IngestGetPipeline returns pipelines based on ID.
-func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService {
- return NewIngestGetPipelineService(c).Id(ids...)
-}
-
-// IngestDeletePipeline deletes a pipeline by ID.
-func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService {
- return NewIngestDeletePipelineService(c).Id(id)
-}
-
-// IngestSimulatePipeline executes a specific pipeline against the set of
-// documents provided in the body of the request.
-func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService {
- return NewIngestSimulatePipelineService(c)
-}
-
-// -- Cluster APIs --
-
-// ClusterHealth retrieves the health of the cluster.
-func (c *Client) ClusterHealth() *ClusterHealthService {
- return NewClusterHealthService(c)
-}
-
-// ClusterState retrieves the state of the cluster.
-func (c *Client) ClusterState() *ClusterStateService {
- return NewClusterStateService(c)
-}
-
-// ClusterStats retrieves cluster statistics.
-func (c *Client) ClusterStats() *ClusterStatsService {
- return NewClusterStatsService(c)
-}
-
-// NodesInfo retrieves one or more or all of the cluster nodes information.
-func (c *Client) NodesInfo() *NodesInfoService {
- return NewNodesInfoService(c)
-}
-
-// NodesStats retrieves one or more or all of the cluster nodes statistics.
-func (c *Client) NodesStats() *NodesStatsService {
- return NewNodesStatsService(c)
-}
-
-// TasksCancel cancels tasks running on the specified nodes.
-func (c *Client) TasksCancel() *TasksCancelService {
- return NewTasksCancelService(c)
-}
-
-// TasksList retrieves the list of tasks running on the specified nodes.
-func (c *Client) TasksList() *TasksListService {
- return NewTasksListService(c)
-}
-
-// TasksGetTask retrieves a task running on the cluster.
-func (c *Client) TasksGetTask() *TasksGetTaskService {
- return NewTasksGetTaskService(c)
-}
-
-// TODO Pending cluster tasks
-// TODO Cluster Reroute
-// TODO Cluster Update Settings
-// TODO Nodes Stats
-// TODO Nodes hot_threads
-
-// -- Snapshot and Restore --
-
-// TODO Snapshot Delete
-// TODO Snapshot Get
-// TODO Snapshot Restore
-// TODO Snapshot Status
-
-// SnapshotCreate creates a snapshot.
-func (c *Client) SnapshotCreate(repository string, snapshot string) *SnapshotCreateService {
- return NewSnapshotCreateService(c).Repository(repository).Snapshot(snapshot)
-}
-
-// SnapshotCreateRepository creates or updates a snapshot repository.
-func (c *Client) SnapshotCreateRepository(repository string) *SnapshotCreateRepositoryService {
- return NewSnapshotCreateRepositoryService(c).Repository(repository)
-}
-
-// SnapshotDeleteRepository deletes a snapshot repository.
-func (c *Client) SnapshotDeleteRepository(repositories ...string) *SnapshotDeleteRepositoryService {
- return NewSnapshotDeleteRepositoryService(c).Repository(repositories...)
-}
-
-// SnapshotGetRepository gets a snapshot repository.
-func (c *Client) SnapshotGetRepository(repositories ...string) *SnapshotGetRepositoryService {
- return NewSnapshotGetRepositoryService(c).Repository(repositories...)
-}
-
-// SnapshotVerifyRepository verifies a snapshot repository.
-func (c *Client) SnapshotVerifyRepository(repository string) *SnapshotVerifyRepositoryService {
- return NewSnapshotVerifyRepositoryService(c).Repository(repository)
-}
-
-// -- Helpers and shortcuts --
-
-// ElasticsearchVersion returns the version number of Elasticsearch
-// running on the given URL.
-func (c *Client) ElasticsearchVersion(url string) (string, error) {
- res, _, err := c.Ping(url).Do(context.Background())
- if err != nil {
- return "", err
- }
- return res.Version.Number, nil
-}
-
-// IndexNames returns the names of all indices in the cluster.
-func (c *Client) IndexNames() ([]string, error) {
- res, err := c.IndexGetSettings().Index("_all").Do(context.Background())
- if err != nil {
- return nil, err
- }
- var names []string
- for name := range res {
- names = append(names, name)
- }
- return names, nil
-}
-
-// Ping checks if a given node in a cluster exists and (optionally)
-// returns some basic information about the Elasticsearch server,
-// e.g. the Elasticsearch version number.
-//
-// Notice that you need to specify a URL here explicitly.
-func (c *Client) Ping(url string) *PingService {
- return NewPingService(c).URL(url)
-}
-
-// WaitForStatus waits for the cluster to have the given status.
-// This is a shortcut method for the ClusterHealth service.
-//
-// WaitForStatus waits for the specified timeout, e.g. "10s".
-// If the cluster will have the given state within the timeout, nil is returned.
-// If the request timed out, ErrTimeout is returned.
-func (c *Client) WaitForStatus(status string, timeout string) error {
- health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background())
- if err != nil {
- return err
- }
- if health.TimedOut {
- return ErrTimeout
- }
- return nil
-}
-
-// WaitForGreenStatus waits for the cluster to have the "green" status.
-// See WaitForStatus for more details.
-func (c *Client) WaitForGreenStatus(timeout string) error {
- return c.WaitForStatus("green", timeout)
-}
-
-// WaitForYellowStatus waits for the cluster to have the "yellow" status.
-// See WaitForStatus for more details.
-func (c *Client) WaitForYellowStatus(timeout string) error {
- return c.WaitForStatus("yellow", timeout)
-}
diff --git a/vendor/github.com/olivere/elastic/client_test.go b/vendor/github.com/olivere/elastic/client_test.go
deleted file mode 100644
index 4d0440ee0..000000000
--- a/vendor/github.com/olivere/elastic/client_test.go
+++ /dev/null
@@ -1,1319 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "log"
- "net"
- "net/http"
- "reflect"
- "regexp"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/fortytw2/leaktest"
-)
-
-func findConn(s string, slice ...*conn) (int, bool) {
- for i, t := range slice {
- if s == t.URL() {
- return i, true
- }
- }
- return -1, false
-}
-
-// -- NewClient --
-
-func TestClientDefaults(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- if client.healthcheckEnabled != true {
- t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled)
- }
- if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup {
- t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup)
- }
- if client.healthcheckTimeout != DefaultHealthcheckTimeout {
- t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout)
- }
- if client.healthcheckInterval != DefaultHealthcheckInterval {
- t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval)
- }
- if client.snifferEnabled != true {
- t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled)
- }
- if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup {
- t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup)
- }
- if client.snifferTimeout != DefaultSnifferTimeout {
- t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout)
- }
- if client.snifferInterval != DefaultSnifferInterval {
- t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval)
- }
- if client.basicAuth != false {
- t.Errorf("expected no basic auth; got: %v", client.basicAuth)
- }
- if client.basicAuthUsername != "" {
- t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername)
- }
- if client.basicAuthPassword != "" {
- t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername)
- }
- if client.sendGetBodyAs != "GET" {
- t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs)
- }
-}
-
-func TestClientWithoutURL(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- // Two things should happen here:
- // 1. The client starts sniffing the cluster on DefaultURL
- // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
- if len(client.conns) == 0 {
- t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
- }
- if !isTravis() {
- if _, found := findConn(DefaultURL, client.conns...); !found {
- t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
- }
- }
-}
-
-func TestClientWithSingleURL(t *testing.T) {
- client, err := NewClient(SetURL("http://127.0.0.1:9200"))
- if err != nil {
- t.Fatal(err)
- }
- // Two things should happen here:
- // 1. The client starts sniffing the cluster on DefaultURL
- // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
- if len(client.conns) == 0 {
- t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
- }
- if !isTravis() {
- if _, found := findConn(DefaultURL, client.conns...); !found {
- t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
- }
- }
-}
-
-func TestClientWithMultipleURLs(t *testing.T) {
- client, err := NewClient(SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
- if err != nil {
- t.Fatal(err)
- }
- // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes.
- if len(client.conns) != 1 {
- t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
- }
- if !isTravis() {
- if client.conns[0].URL() != DefaultURL {
- t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
- }
- }
-}
-
-func TestClientWithBasicAuth(t *testing.T) {
- client, err := NewClient(SetBasicAuth("user", "secret"))
- if err != nil {
- t.Fatal(err)
- }
- if client.basicAuth != true {
- t.Errorf("expected basic auth; got: %v", client.basicAuth)
- }
- if got, want := client.basicAuthUsername, "user"; got != want {
- t.Errorf("expected basic auth username %q; got: %q", want, got)
- }
- if got, want := client.basicAuthPassword, "secret"; got != want {
- t.Errorf("expected basic auth password %q; got: %q", want, got)
- }
-}
-
-func TestClientWithBasicAuthInUserInfo(t *testing.T) {
- client, err := NewClient(SetURL("http://user1:secret1@localhost:9200", "http://user2:secret2@localhost:9200"))
- if err != nil {
- t.Fatal(err)
- }
- if client.basicAuth != true {
- t.Errorf("expected basic auth; got: %v", client.basicAuth)
- }
- if got, want := client.basicAuthUsername, "user1"; got != want {
- t.Errorf("expected basic auth username %q; got: %q", want, got)
- }
- if got, want := client.basicAuthPassword, "secret1"; got != want {
- t.Errorf("expected basic auth password %q; got: %q", want, got)
- }
-}
-
-func TestClientSniffSuccess(t *testing.T) {
- client, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200"))
- if err != nil {
- t.Fatal(err)
- }
- // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes.
- if len(client.conns) != 1 {
- t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
- }
-}
-
-func TestClientSniffFailure(t *testing.T) {
- _, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:19201"))
- if err == nil {
- t.Fatalf("expected cluster to fail with no nodes found")
- }
-}
-
-func TestClientSnifferCallback(t *testing.T) {
- var calls int
- cb := func(node *NodesInfoNode) bool {
- calls++
- return false
- }
- _, err := NewClient(
- SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200"),
- SetSnifferCallback(cb))
- if err == nil {
- t.Fatalf("expected cluster to fail with no nodes found")
- }
- if calls != 1 {
- t.Fatalf("expected 1 call to the sniffer callback, got %d", calls)
- }
-}
-
-func TestClientSniffDisabled(t *testing.T) {
- client, err := NewClient(SetSniff(false), SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
- if err != nil {
- t.Fatal(err)
- }
- // The client should not sniff, so it should have two connections.
- if len(client.conns) != 2 {
- t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns)
- }
- // Make two requests, so that both connections are being used
- for i := 0; i < len(client.conns); i++ {
- client.Flush().Do(context.TODO())
- }
- // The first connection (127.0.0.1:9200) should now be okay.
- if i, found := findConn("http://127.0.0.1:9200", client.conns...); !found {
- t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9200")
- } else {
- if conn := client.conns[i]; conn.IsDead() {
- t.Fatal("expected connection to be alive, but it is dead")
- }
- }
- // The second connection (127.0.0.1:9201) should now be marked as dead.
- if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found {
- t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201")
- } else {
- if conn := client.conns[i]; !conn.IsDead() {
- t.Fatal("expected connection to be dead, but it is alive")
- }
- }
-}
-
-func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) {
- client, err := NewClient(SetURL("http://127.0.0.1:9201"),
- SetSniff(false), SetHealthcheck(false), SetMaxRetries(0))
- if err != nil {
- t.Fatal(err)
- }
- // We should have a connection.
- if len(client.conns) != 1 {
- t.Fatalf("expected 1 node, got: %d (%v)", len(client.conns), client.conns)
- }
-
- // Make a request, so that the connections is marked as dead.
- client.Flush().Do(context.TODO())
-
- // The connection should now be marked as dead.
- if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found {
- t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201")
- } else {
- if conn := client.conns[i]; !conn.IsDead() {
- t.Fatalf("expected connection to be dead, got: %v", conn)
- }
- }
-
- // Now send another request and the connection should be marked as alive again.
- client.Flush().Do(context.TODO())
-
- if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found {
- t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201")
- } else {
- if conn := client.conns[i]; conn.IsDead() {
- t.Fatalf("expected connection to be alive, got: %v", conn)
- }
- }
-}
-
-func TestClientWithRequiredPlugins(t *testing.T) {
- _, err := NewClient(SetRequiredPlugins("no-such-plugin"))
- if err == nil {
- t.Fatal("expected error when creating client")
- }
- if got, want := err.Error(), "elastic: plugin no-such-plugin not found"; got != want {
- t.Fatalf("expected error %q; got: %q", want, got)
- }
-}
-
-func TestClientHealthcheckStartupTimeout(t *testing.T) {
- start := time.Now()
- _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second))
- duration := time.Now().Sub(start)
- if !IsConnErr(err) {
- t.Fatal(err)
- }
- if !strings.Contains(err.Error(), "connection refused") {
- t.Fatalf("expected error to contain %q, have %q", "connection refused", err.Error())
- }
- if duration < 5*time.Second {
- t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration)
- }
-}
-
-func TestClientHealthcheckTimeoutLeak(t *testing.T) {
- // This test test checks if healthcheck requests are canceled
- // after timeout.
- // It contains couple of hacks which won't be needed once we
- // stop supporting Go1.7.
- // On Go1.7 it uses server side effects to monitor if connection
- // was closed,
- // and on Go 1.8+ we're additionally honestly monitoring routine
- // leaks via leaktest.
- mux := http.NewServeMux()
-
- var reqDoneMu sync.Mutex
- var reqDone bool
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- cn, ok := w.(http.CloseNotifier)
- if !ok {
- t.Fatalf("Writer is not CloseNotifier, but %v", reflect.TypeOf(w).Name())
- }
- <-cn.CloseNotify()
- reqDoneMu.Lock()
- reqDone = true
- reqDoneMu.Unlock()
- })
-
- lis, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("Couldn't setup listener: %v", err)
- }
- addr := lis.Addr().String()
-
- srv := &http.Server{
- Handler: mux,
- }
- go srv.Serve(lis)
-
- cli := &Client{
- c: &http.Client{},
- conns: []*conn{
- &conn{
- url: "http://" + addr + "/",
- },
- },
- }
-
- type closer interface {
- Shutdown(context.Context) error
- }
-
- // pre-Go1.8 Server can't Shutdown
- cl, isServerCloseable := (interface{}(srv)).(closer)
-
- // Since Go1.7 can't Shutdown() - there will be leak from server
- // Monitor leaks on Go 1.8+
- if isServerCloseable {
- defer leaktest.CheckTimeout(t, time.Second*10)()
- }
-
- cli.healthcheck(time.Millisecond*500, true)
-
- if isServerCloseable {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
- cl.Shutdown(ctx)
- }
-
- <-time.After(time.Second)
- reqDoneMu.Lock()
- if !reqDone {
- reqDoneMu.Unlock()
- t.Fatal("Request wasn't canceled or stopped")
- }
- reqDoneMu.Unlock()
-}
-
-// -- NewSimpleClient --
-
-func TestSimpleClientDefaults(t *testing.T) {
- client, err := NewSimpleClient()
- if err != nil {
- t.Fatal(err)
- }
- if client.healthcheckEnabled != false {
- t.Errorf("expected health checks to be disabled, got: %v", client.healthcheckEnabled)
- }
- if client.healthcheckTimeoutStartup != off {
- t.Errorf("expected health checks timeout on startup = %v, got: %v", off, client.healthcheckTimeoutStartup)
- }
- if client.healthcheckTimeout != off {
- t.Errorf("expected health checks timeout = %v, got: %v", off, client.healthcheckTimeout)
- }
- if client.healthcheckInterval != off {
- t.Errorf("expected health checks interval = %v, got: %v", off, client.healthcheckInterval)
- }
- if client.snifferEnabled != false {
- t.Errorf("expected sniffing to be disabled, got: %v", client.snifferEnabled)
- }
- if client.snifferTimeoutStartup != off {
- t.Errorf("expected sniffer timeout on startup = %v, got: %v", off, client.snifferTimeoutStartup)
- }
- if client.snifferTimeout != off {
- t.Errorf("expected sniffer timeout = %v, got: %v", off, client.snifferTimeout)
- }
- if client.snifferInterval != off {
- t.Errorf("expected sniffer interval = %v, got: %v", off, client.snifferInterval)
- }
- if client.basicAuth != false {
- t.Errorf("expected no basic auth; got: %v", client.basicAuth)
- }
- if client.basicAuthUsername != "" {
- t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername)
- }
- if client.basicAuthPassword != "" {
- t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername)
- }
- if client.sendGetBodyAs != "GET" {
- t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs)
- }
-}
-
-// -- Start and stop --
-
-func TestClientStartAndStop(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
-
- running := client.IsRunning()
- if !running {
- t.Fatalf("expected background processes to run; got: %v", running)
- }
-
- // Stop
- client.Stop()
- running = client.IsRunning()
- if running {
- t.Fatalf("expected background processes to be stopped; got: %v", running)
- }
-
- // Stop again => no-op
- client.Stop()
- running = client.IsRunning()
- if running {
- t.Fatalf("expected background processes to be stopped; got: %v", running)
- }
-
- // Start
- client.Start()
- running = client.IsRunning()
- if !running {
- t.Fatalf("expected background processes to run; got: %v", running)
- }
-
- // Start again => no-op
- client.Start()
- running = client.IsRunning()
- if !running {
- t.Fatalf("expected background processes to run; got: %v", running)
- }
-}
-
-func TestClientStartAndStopWithSnifferAndHealthchecksDisabled(t *testing.T) {
- client, err := NewClient(SetSniff(false), SetHealthcheck(false))
- if err != nil {
- t.Fatal(err)
- }
-
- running := client.IsRunning()
- if !running {
- t.Fatalf("expected background processes to run; got: %v", running)
- }
-
- // Stop
- client.Stop()
- running = client.IsRunning()
- if running {
- t.Fatalf("expected background processes to be stopped; got: %v", running)
- }
-
- // Stop again => no-op
- client.Stop()
- running = client.IsRunning()
- if running {
- t.Fatalf("expected background processes to be stopped; got: %v", running)
- }
-
- // Start
- client.Start()
- running = client.IsRunning()
- if !running {
- t.Fatalf("expected background processes to run; got: %v", running)
- }
-
- // Start again => no-op
- client.Start()
- running = client.IsRunning()
- if !running {
- t.Fatalf("expected background processes to run; got: %v", running)
- }
-}
-
-// -- Sniffing --
-
-func TestClientSniffNode(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
-
- ch := make(chan []*conn)
- go func() { ch <- client.sniffNode(context.Background(), DefaultURL) }()
-
- select {
- case nodes := <-ch:
- if len(nodes) != 1 {
- t.Fatalf("expected %d nodes; got: %d", 1, len(nodes))
- }
- pattern := `http:\/\/[\d\.]+:9200`
- matched, err := regexp.MatchString(pattern, nodes[0].URL())
- if err != nil {
- t.Fatal(err)
- }
- if !matched {
- t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL())
- }
- case <-time.After(2 * time.Second):
- t.Fatal("expected no timeout in sniff node")
- break
- }
-}
-
-func TestClientSniffOnDefaultURL(t *testing.T) {
- client, _ := NewClient()
- if client == nil {
- t.Fatal("no client returned")
- }
-
- ch := make(chan error, 1)
- go func() {
- ch <- client.sniff(DefaultSnifferTimeoutStartup)
- }()
-
- select {
- case err := <-ch:
- if err != nil {
- t.Fatalf("expected sniff to succeed; got: %v", err)
- }
- if len(client.conns) != 1 {
- t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns))
- }
- pattern := `http:\/\/[\d\.]+:9200`
- matched, err := regexp.MatchString(pattern, client.conns[0].URL())
- if err != nil {
- t.Fatal(err)
- }
- if !matched {
- t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL())
- }
- case <-time.After(2 * time.Second):
- t.Fatal("expected no timeout in sniff")
- break
- }
-}
-
-func TestClientSniffTimeoutLeak(t *testing.T) {
- // This test test checks if sniff requests are canceled
- // after timeout.
- // It contains couple of hacks which won't be needed once we
- // stop supporting Go1.7.
- // On Go1.7 it uses server side effects to monitor if connection
- // was closed,
- // and on Go 1.8+ we're additionally honestly monitoring routine
- // leaks via leaktest.
- mux := http.NewServeMux()
-
- var reqDoneMu sync.Mutex
- var reqDone bool
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- cn, ok := w.(http.CloseNotifier)
- if !ok {
- t.Fatalf("Writer is not CloseNotifier, but %v", reflect.TypeOf(w).Name())
- }
- <-cn.CloseNotify()
- reqDoneMu.Lock()
- reqDone = true
- reqDoneMu.Unlock()
- })
-
- lis, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("Couldn't setup listener: %v", err)
- }
- addr := lis.Addr().String()
-
- srv := &http.Server{
- Handler: mux,
- }
- go srv.Serve(lis)
-
- cli := &Client{
- c: &http.Client{},
- conns: []*conn{
- &conn{
- url: "http://" + addr + "/",
- },
- },
- snifferEnabled: true,
- }
-
- type closer interface {
- Shutdown(context.Context) error
- }
-
- // pre-Go1.8 Server can't Shutdown
- cl, isServerCloseable := (interface{}(srv)).(closer)
-
- // Since Go1.7 can't Shutdown() - there will be leak from server
- // Monitor leaks on Go 1.8+
- if isServerCloseable {
- defer leaktest.CheckTimeout(t, time.Second*10)()
- }
-
- cli.sniff(time.Millisecond * 500)
-
- if isServerCloseable {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
- cl.Shutdown(ctx)
- }
-
- <-time.After(time.Second)
- reqDoneMu.Lock()
- if !reqDone {
- reqDoneMu.Unlock()
- t.Fatal("Request wasn't canceled or stopped")
- }
- reqDoneMu.Unlock()
-}
-
-func TestClientExtractHostname(t *testing.T) {
- tests := []struct {
- Scheme string
- Address string
- Output string
- }{
- {
- Scheme: "http",
- Address: "",
- Output: "",
- },
- {
- Scheme: "https",
- Address: "abc",
- Output: "",
- },
- {
- Scheme: "http",
- Address: "127.0.0.1:19200",
- Output: "http://127.0.0.1:19200",
- },
- {
- Scheme: "https",
- Address: "127.0.0.1:9200",
- Output: "https://127.0.0.1:9200",
- },
- {
- Scheme: "http",
- Address: "myelk.local/10.1.0.24:9200",
- Output: "http://10.1.0.24:9200",
- },
- }
-
- client, err := NewClient(SetSniff(false), SetHealthcheck(false))
- if err != nil {
- t.Fatal(err)
- }
- for _, test := range tests {
- got := client.extractHostname(test.Scheme, test.Address)
- if want := test.Output; want != got {
- t.Errorf("expected %q; got: %q", want, got)
- }
- }
-}
-
-// -- Selector --
-
-func TestClientSelectConnHealthy(t *testing.T) {
- client, err := NewClient(
- SetSniff(false),
- SetHealthcheck(false),
- SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
- if err != nil {
- t.Fatal(err)
- }
-
- // Both are healthy, so we should get both URLs in round-robin
- client.conns[0].MarkAsHealthy()
- client.conns[1].MarkAsHealthy()
-
- // #1: Return 1st
- c, err := client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[0].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
- }
- // #2: Return 2nd
- c, err = client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[1].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
- }
- // #3: Return 1st
- c, err = client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[0].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
- }
-}
-
-func TestClientSelectConnHealthyAndDead(t *testing.T) {
- client, err := NewClient(
- SetSniff(false),
- SetHealthcheck(false),
- SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
- if err != nil {
- t.Fatal(err)
- }
-
- // 1st is healthy, second is dead
- client.conns[0].MarkAsHealthy()
- client.conns[1].MarkAsDead()
-
- // #1: Return 1st
- c, err := client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[0].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
- }
- // #2: Return 1st again
- c, err = client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[0].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
- }
- // #3: Return 1st again and again
- c, err = client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[0].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
- }
-}
-
-func TestClientSelectConnDeadAndHealthy(t *testing.T) {
- client, err := NewClient(
- SetSniff(false),
- SetHealthcheck(false),
- SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
- if err != nil {
- t.Fatal(err)
- }
-
- // 1st is dead, 2nd is healthy
- client.conns[0].MarkAsDead()
- client.conns[1].MarkAsHealthy()
-
- // #1: Return 2nd
- c, err := client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[1].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
- }
- // #2: Return 2nd again
- c, err = client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[1].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
- }
- // #3: Return 2nd again and again
- c, err = client.next()
- if err != nil {
- t.Fatal(err)
- }
- if c.URL() != client.conns[1].URL() {
- t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
- }
-}
-
-func TestClientSelectConnAllDead(t *testing.T) {
- client, err := NewClient(
- SetSniff(false),
- SetHealthcheck(false),
- SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
- if err != nil {
- t.Fatal(err)
- }
-
- // Both are dead
- client.conns[0].MarkAsDead()
- client.conns[1].MarkAsDead()
-
- // If all connections are dead, next should make them alive again, but
- // still return an error when it first finds out.
- c, err := client.next()
- if !IsConnErr(err) {
- t.Fatal(err)
- }
- if c != nil {
- t.Fatalf("expected no connection; got: %v", c)
- }
- // Return a connection
- c, err = client.next()
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if c == nil {
- t.Fatalf("expected connection; got: %v", c)
- }
- // Return a connection
- c, err = client.next()
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if c == nil {
- t.Fatalf("expected connection; got: %v", c)
- }
-}
-
-// -- ElasticsearchVersion --
-
-func TestElasticsearchVersion(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- version, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if version == "" {
- t.Errorf("expected a version number, got: %q", version)
- }
-}
-
-// -- IndexNames --
-
-func TestIndexNames(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- names, err := client.IndexNames()
- if err != nil {
- t.Fatal(err)
- }
- if len(names) == 0 {
- t.Fatalf("expected some index names, got: %d", len(names))
- }
- var found bool
- for _, name := range names {
- if name == testIndexName {
- found = true
- break
- }
- }
- if !found {
- t.Fatalf("expected to find index %q; got: %v", testIndexName, found)
- }
-}
-
-// -- PerformRequest --
-
-func TestPerformRequest(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected response to be != nil")
- }
-
- ret := new(PingResult)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- t.Fatalf("expected no error on decode; got: %v", err)
- }
- if ret.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", ret.ClusterName)
- }
-}
-
-func TestPerformRequestWithSimpleClient(t *testing.T) {
- client, err := NewSimpleClient()
- if err != nil {
- t.Fatal(err)
- }
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected response to be != nil")
- }
-
- ret := new(PingResult)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- t.Fatalf("expected no error on decode; got: %v", err)
- }
- if ret.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", ret.ClusterName)
- }
-}
-
-func TestPerformRequestWithLogger(t *testing.T) {
- var w bytes.Buffer
- out := log.New(&w, "LOGGER ", log.LstdFlags)
-
- client, err := NewClient(SetInfoLog(out), SetSniff(false))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected response to be != nil")
- }
-
- ret := new(PingResult)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- t.Fatalf("expected no error on decode; got: %v", err)
- }
- if ret.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", ret.ClusterName)
- }
-
- got := w.String()
- pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n`
- matched, err := regexp.MatchString(pattern, got)
- if err != nil {
- t.Fatalf("expected log line to match %q; got: %v", pattern, err)
- }
- if !matched {
- t.Errorf("expected log line to match %q; got: %v", pattern, got)
- }
-}
-
-func TestPerformRequestWithLoggerAndTracer(t *testing.T) {
- var lw bytes.Buffer
- lout := log.New(&lw, "LOGGER ", log.LstdFlags)
-
- var tw bytes.Buffer
- tout := log.New(&tw, "TRACER ", log.LstdFlags)
-
- client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout), SetSniff(false))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected response to be != nil")
- }
-
- ret := new(PingResult)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- t.Fatalf("expected no error on decode; got: %v", err)
- }
- if ret.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", ret.ClusterName)
- }
-
- lgot := lw.String()
- if lgot == "" {
- t.Errorf("expected logger output; got: %q", lgot)
- }
-
- tgot := tw.String()
- if tgot == "" {
- t.Errorf("expected tracer output; got: %q", tgot)
- }
-}
-func TestPerformRequestWithTracerOnError(t *testing.T) {
- var tw bytes.Buffer
- tout := log.New(&tw, "TRACER ", log.LstdFlags)
-
- client, err := NewClient(SetTraceLog(tout), SetSniff(false))
- if err != nil {
- t.Fatal(err)
- }
-
- client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/no-such-index",
- })
-
- tgot := tw.String()
- if tgot == "" {
- t.Errorf("expected tracer output; got: %q", tgot)
- }
-}
-
-type customLogger struct {
- out bytes.Buffer
-}
-
-func (l *customLogger) Printf(format string, v ...interface{}) {
- l.out.WriteString(fmt.Sprintf(format, v...) + "\n")
-}
-
-func TestPerformRequestWithCustomLogger(t *testing.T) {
- logger := &customLogger{}
-
- client, err := NewClient(SetInfoLog(logger), SetSniff(false))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected response to be != nil")
- }
-
- ret := new(PingResult)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- t.Fatalf("expected no error on decode; got: %v", err)
- }
- if ret.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", ret.ClusterName)
- }
-
- got := logger.out.String()
- pattern := `^GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n`
- matched, err := regexp.MatchString(pattern, got)
- if err != nil {
- t.Fatalf("expected log line to match %q; got: %v", pattern, err)
- }
- if !matched {
- t.Errorf("expected log line to match %q; got: %v", pattern, got)
- }
-}
-
-// failingTransport will run a fail callback if it sees a given URL path prefix.
-type failingTransport struct {
- path string // path prefix to look for
- fail func(*http.Request) (*http.Response, error) // call when path prefix is found
- next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil)
-}
-
-// RoundTrip implements a failing transport.
-func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) {
- if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil {
- return tr.fail(r)
- }
- if tr.next != nil {
- return tr.next.RoundTrip(r)
- }
- return http.DefaultTransport.RoundTrip(r)
-}
-
-func TestPerformRequestRetryOnHttpError(t *testing.T) {
- var numFailedReqs int
- fail := func(r *http.Request) (*http.Response, error) {
- numFailedReqs += 1
- //return &http.Response{Request: r, StatusCode: 400}, nil
- return nil, errors.New("request failed")
- }
-
- // Run against a failing endpoint and see if PerformRequest
- // retries correctly.
- tr := &failingTransport{path: "/fail", fail: fail}
- httpClient := &http.Client{Transport: tr}
-
- client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5), SetHealthcheck(false))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/fail",
- })
- if err == nil {
- t.Fatal("expected error")
- }
- if res != nil {
- t.Fatal("expected no response")
- }
- // Connection should be marked as dead after it failed
- if numFailedReqs != 5 {
- t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs)
- }
-}
-
-func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) {
- var numFailedReqs int
- fail := func(r *http.Request) (*http.Response, error) {
- numFailedReqs += 1
- return &http.Response{Request: r, StatusCode: 500}, nil
- }
-
- // Run against a failing endpoint and see if PerformRequest
- // retries correctly.
- tr := &failingTransport{path: "/fail", fail: fail}
- httpClient := &http.Client{Transport: tr}
-
- client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5), SetHealthcheck(false))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/fail",
- })
- if err == nil {
- t.Fatal("expected error")
- }
- if res == nil {
- t.Fatal("expected response, got nil")
- }
- if want, got := 500, res.StatusCode; want != got {
- t.Fatalf("expected status code = %d, got %d", want, got)
- }
- // Retry should not have triggered additional requests because
- if numFailedReqs != 1 {
- t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs)
- }
-}
-
-// failingBody will return an error when json.Marshal is called on it.
-type failingBody struct{}
-
-// MarshalJSON implements the json.Marshaler interface and always returns an error.
-func (fb failingBody) MarshalJSON() ([]byte, error) {
- return nil, errors.New("failing to marshal")
-}
-
-func TestPerformRequestWithSetBodyError(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/",
- Body: failingBody{},
- })
- if err == nil {
- t.Fatal("expected error")
- }
- if res != nil {
- t.Fatal("expected no response")
- }
-}
-
-// sleepingTransport will sleep before doing a request.
-type sleepingTransport struct {
- timeout time.Duration
-}
-
-// RoundTrip implements a "sleepy" transport.
-func (tr *sleepingTransport) RoundTrip(r *http.Request) (*http.Response, error) {
- time.Sleep(tr.timeout)
- return http.DefaultTransport.RoundTrip(r)
-}
-
-func TestPerformRequestWithCancel(t *testing.T) {
- tr := &sleepingTransport{timeout: 3 * time.Second}
- httpClient := &http.Client{Transport: tr}
-
- client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0))
- if err != nil {
- t.Fatal(err)
- }
-
- type result struct {
- res *Response
- err error
- }
- ctx, cancel := context.WithCancel(context.Background())
-
- resc := make(chan result, 1)
- go func() {
- res, err := client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- resc <- result{res: res, err: err}
- }()
- select {
- case <-time.After(1 * time.Second):
- cancel()
- case res := <-resc:
- t.Fatalf("expected response before cancel, got %v", res)
- case <-ctx.Done():
- t.Fatalf("expected no early termination, got ctx.Done(): %v", ctx.Err())
- }
- err = ctx.Err()
- if err != context.Canceled {
- t.Fatalf("expected error context.Canceled, got: %v", err)
- }
-}
-
-func TestPerformRequestWithTimeout(t *testing.T) {
- tr := &sleepingTransport{timeout: 3 * time.Second}
- httpClient := &http.Client{Transport: tr}
-
- client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0))
- if err != nil {
- t.Fatal(err)
- }
-
- type result struct {
- res *Response
- err error
- }
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
- defer cancel()
-
- resc := make(chan result, 1)
- go func() {
- res, err := client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- resc <- result{res: res, err: err}
- }()
- select {
- case res := <-resc:
- t.Fatalf("expected timeout before response, got %v", res)
- case <-ctx.Done():
- err := ctx.Err()
- if err != context.DeadlineExceeded {
- t.Fatalf("expected error context.DeadlineExceeded, got: %v", err)
- }
- }
-}
-
-// -- Compression --
-
-// Notice that the trace log does always print "Accept-Encoding: gzip"
-// regardless of whether compression is enabled or not. This is because
-// of the underlying "httputil.DumpRequestOut".
-//
-// Use a real HTTP proxy/recorder to convince yourself that
-// "Accept-Encoding: gzip" is NOT sent when DisableCompression
-// is set to true.
-//
-// See also:
-// https://groups.google.com/forum/#!topic/golang-nuts/ms8QNCzew8Q
-
-func TestPerformRequestWithCompressionEnabled(t *testing.T) {
- testPerformRequestWithCompression(t, &http.Client{
- Transport: &http.Transport{
- DisableCompression: true,
- },
- })
-}
-
-func TestPerformRequestWithCompressionDisabled(t *testing.T) {
- testPerformRequestWithCompression(t, &http.Client{
- Transport: &http.Transport{
- DisableCompression: false,
- },
- })
-}
-
-func testPerformRequestWithCompression(t *testing.T, hc *http.Client) {
- client, err := NewClient(SetHttpClient(hc), SetSniff(false))
- if err != nil {
- t.Fatal(err)
- }
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/",
- })
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected response to be != nil")
- }
-
- ret := new(PingResult)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- t.Fatalf("expected no error on decode; got: %v", err)
- }
- if ret.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", ret.ClusterName)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/cluster-test/Makefile b/vendor/github.com/olivere/elastic/cluster-test/Makefile
deleted file mode 100644
index cc6261db5..000000000
--- a/vendor/github.com/olivere/elastic/cluster-test/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-.PHONY: build run-omega-cluster-test
-
-default: build
-
-build:
- go build cluster-test.go
-
-run-omega-cluster-test:
- go run -race cluster-test.go \
- -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \
- -n=5 \
- -retries=5 \
- -sniff=true -sniffer=10s \
- -healthcheck=true -healthchecker=5s \
- -errorlog=errors.log
-
diff --git a/vendor/github.com/olivere/elastic/cluster-test/README.md b/vendor/github.com/olivere/elastic/cluster-test/README.md
deleted file mode 100644
index f10748cc2..000000000
--- a/vendor/github.com/olivere/elastic/cluster-test/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# Cluster Test
-
-This directory contains a program you can use to test a cluster.
-
-Here's how:
-
-First, install a cluster of Elasticsearch nodes. You can install them on
-different computers, or start several nodes on a single machine.
-
-Build cluster-test by `go build cluster-test.go` (or build with `make`).
-
-Run `./cluster-test -h` to get a list of flags:
-
-```sh
-$ ./cluster-test -h
-Usage of ./cluster-test:
- -errorlog="": error log file
- -healthcheck=true: enable or disable healthchecks
- -healthchecker=1m0s: healthcheck interval
- -index="twitter": name of ES index to use
- -infolog="": info log file
- -n=5: number of goroutines that run searches
- -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
- -retries=0: number of retries
- -sniff=true: enable or disable sniffer
- -sniffer=15m0s: sniffer interval
- -tracelog="": trace log file
-```
-
-Example:
-
-```sh
-$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
-```
-
-The above example will create an index and start some search jobs on the
-cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
-and http://127.0.0.1:9202.
-
-* It will create an index called `twitter` on the cluster (`-index=twitter`)
-* It will run 5 search jobs in parallel (`-n=5`).
-* It will retry failed requests 5 times (`-retries=5`).
-* It will sniff the cluster periodically (`-sniff=true`).
-* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
-* It will perform health checks periodically (`-healthcheck=true`).
-* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
-* It will write an error log file (`-errorlog=error.log`).
-
-If you want to test Elastic with nodes going up and down, you can use a
-chaos monkey script like this and run it on the nodes of your cluster:
-
-```sh
-#!/bin/bash
-while true
-do
- echo "Starting ES node"
- elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
- sleep `jot -r 1 10 300` # wait for 10-300s
- echo "Stopping ES node"
- kill -TERM `cat es.pid`
- sleep `jot -r 1 10 60` # wait for 10-60s
-done
-```
diff --git a/vendor/github.com/olivere/elastic/cluster-test/cluster-test.go b/vendor/github.com/olivere/elastic/cluster-test/cluster-test.go
deleted file mode 100644
index 96b0c5d9b..000000000
--- a/vendor/github.com/olivere/elastic/cluster-test/cluster-test.go
+++ /dev/null
@@ -1,361 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package main
-
-import (
- "context"
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "log"
- "math/rand"
- "os"
- "runtime"
- "strings"
- "sync/atomic"
- "time"
-
- elastic "github.com/olivere/elastic"
-)
-
-type Tweet struct {
- User string `json:"user"`
- Message string `json:"message"`
- Retweets int `json:"retweets"`
- Image string `json:"image,omitempty"`
- Created time.Time `json:"created,omitempty"`
- Tags []string `json:"tags,omitempty"`
- Location string `json:"location,omitempty"`
- Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
-}
-
-var (
- nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')")
- n = flag.Int("n", 5, "number of goroutines that run searches")
- index = flag.String("index", "twitter", "name of ES index to use")
- errorlogfile = flag.String("errorlog", "", "error log file")
- infologfile = flag.String("infolog", "", "info log file")
- tracelogfile = flag.String("tracelog", "", "trace log file")
- retries = flag.Int("retries", 0, "number of retries")
- sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer")
- sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval")
- healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks")
- healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval")
-)
-
-func main() {
- flag.Parse()
-
- runtime.GOMAXPROCS(runtime.NumCPU())
-
- if *nodes == "" {
- log.Fatal("no nodes specified")
- }
- urls := strings.SplitN(*nodes, ",", -1)
-
- testcase, err := NewTestCase(*index, urls)
- if err != nil {
- log.Fatal(err)
- }
-
- testcase.SetErrorLogFile(*errorlogfile)
- testcase.SetInfoLogFile(*infologfile)
- testcase.SetTraceLogFile(*tracelogfile)
- testcase.SetMaxRetries(*retries)
- testcase.SetHealthcheck(*healthcheck)
- testcase.SetHealthcheckInterval(*healthchecker)
- testcase.SetSniff(*sniff)
- testcase.SetSnifferInterval(*sniffer)
-
- if err := testcase.Run(*n); err != nil {
- log.Fatal(err)
- }
-
- select {}
-}
-
-type RunInfo struct {
- Success bool
-}
-
-type TestCase struct {
- nodes []string
- client *elastic.Client
- runs int64
- failures int64
- runCh chan RunInfo
- index string
- errorlogfile string
- infologfile string
- tracelogfile string
- maxRetries int
- healthcheck bool
- healthcheckInterval time.Duration
- sniff bool
- snifferInterval time.Duration
-}
-
-func NewTestCase(index string, nodes []string) (*TestCase, error) {
- if index == "" {
- return nil, errors.New("no index name specified")
- }
-
- return &TestCase{
- index: index,
- nodes: nodes,
- runCh: make(chan RunInfo),
- }, nil
-}
-
-func (t *TestCase) SetIndex(name string) {
- t.index = name
-}
-
-func (t *TestCase) SetErrorLogFile(name string) {
- t.errorlogfile = name
-}
-
-func (t *TestCase) SetInfoLogFile(name string) {
- t.infologfile = name
-}
-
-func (t *TestCase) SetTraceLogFile(name string) {
- t.tracelogfile = name
-}
-
-func (t *TestCase) SetMaxRetries(n int) {
- t.maxRetries = n
-}
-
-func (t *TestCase) SetSniff(enabled bool) {
- t.sniff = enabled
-}
-
-func (t *TestCase) SetSnifferInterval(d time.Duration) {
- t.snifferInterval = d
-}
-
-func (t *TestCase) SetHealthcheck(enabled bool) {
- t.healthcheck = enabled
-}
-
-func (t *TestCase) SetHealthcheckInterval(d time.Duration) {
- t.healthcheckInterval = d
-}
-
-func (t *TestCase) Run(n int) error {
- if err := t.setup(); err != nil {
- return err
- }
-
- for i := 1; i < n; i++ {
- go t.search()
- }
-
- go t.monitor()
-
- return nil
-}
-
-func (t *TestCase) monitor() {
- print := func() {
- fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ")
- }
-
- for {
- select {
- case run := <-t.runCh:
- atomic.AddInt64(&t.runs, 1)
- if !run.Success {
- atomic.AddInt64(&t.failures, 1)
- fmt.Println()
- }
- print()
- case <-time.After(5 * time.Second):
- // Print stats after some inactivity
- print()
- break
- }
- }
-}
-
-func (t *TestCase) setup() error {
- var errorlogger *log.Logger
- if t.errorlogfile != "" {
- f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
- if err != nil {
- return err
- }
- errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
- }
-
- var infologger *log.Logger
- if t.infologfile != "" {
- f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
- if err != nil {
- return err
- }
- infologger = log.New(f, "", log.LstdFlags)
- }
-
- // Trace request and response details like this
- var tracelogger *log.Logger
- if t.tracelogfile != "" {
- f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
- if err != nil {
- return err
- }
- tracelogger = log.New(f, "", log.LstdFlags)
- }
-
- client, err := elastic.NewClient(
- elastic.SetURL(t.nodes...),
- elastic.SetErrorLog(errorlogger),
- elastic.SetInfoLog(infologger),
- elastic.SetTraceLog(tracelogger),
- elastic.SetMaxRetries(t.maxRetries),
- elastic.SetSniff(t.sniff),
- elastic.SetSnifferInterval(t.snifferInterval),
- elastic.SetHealthcheck(t.healthcheck),
- elastic.SetHealthcheckInterval(t.healthcheckInterval))
- if err != nil {
- // Handle error
- return err
- }
- t.client = client
-
- ctx := context.Background()
-
- // Use the IndexExists service to check if a specified index exists.
- exists, err := t.client.IndexExists(t.index).Do(ctx)
- if err != nil {
- return err
- }
- if exists {
- deleteIndex, err := t.client.DeleteIndex(t.index).Do(ctx)
- if err != nil {
- return err
- }
- if !deleteIndex.Acknowledged {
- return errors.New("delete index not acknowledged")
- }
- }
-
- // Create a new index.
- createIndex, err := t.client.CreateIndex(t.index).Do(ctx)
- if err != nil {
- return err
- }
- if !createIndex.Acknowledged {
- return errors.New("create index not acknowledged")
- }
-
- // Index a tweet (using JSON serialization)
- tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
- _, err = t.client.Index().
- Index(t.index).
- Type("tweet").
- Id("1").
- BodyJson(tweet1).
- Do(ctx)
- if err != nil {
- return err
- }
-
- // Index a second tweet (by string)
- tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
- _, err = t.client.Index().
- Index(t.index).
- Type("tweet").
- Id("2").
- BodyString(tweet2).
- Do(ctx)
- if err != nil {
- return err
- }
-
- // Flush to make sure the documents got written.
- _, err = t.client.Flush().Index(t.index).Do(ctx)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (t *TestCase) search() {
- ctx := context.Background()
-
- // Loop forever to check for connection issues
- for {
- // Get tweet with specified ID
- get1, err := t.client.Get().
- Index(t.index).
- Type("tweet").
- Id("1").
- Do(ctx)
- if err != nil {
- //failf("Get failed: %v", err)
- t.runCh <- RunInfo{Success: false}
- continue
- }
- if !get1.Found {
- //log.Printf("Document %s not found\n", "1")
- //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
- t.runCh <- RunInfo{Success: false}
- continue
- }
-
- // Search with a term query
- searchResult, err := t.client.Search().
- Index(t.index). // search in index t.index
- Query(elastic.NewTermQuery("user", "olivere")). // specify the query
- Sort("user", true). // sort by "user" field, ascending
- From(0).Size(10). // take documents 0-9
- Pretty(true). // pretty print request and response JSON
- Do(ctx) // execute
- if err != nil {
- //failf("Search failed: %v\n", err)
- t.runCh <- RunInfo{Success: false}
- continue
- }
-
- // searchResult is of type SearchResult and returns hits, suggestions,
- // and all kinds of other information from Elasticsearch.
- //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
-
- // Number of hits
- if searchResult.Hits.TotalHits > 0 {
- //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
-
- // Iterate through results
- for _, hit := range searchResult.Hits.Hits {
- // hit.Index contains the name of the index
-
- // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
- var tweet Tweet
- err := json.Unmarshal(*hit.Source, &tweet)
- if err != nil {
- // Deserialization failed
- //failf("Deserialize failed: %v\n", err)
- t.runCh <- RunInfo{Success: false}
- continue
- }
-
- // Work with tweet
- //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
- }
- } else {
- // No hits
- //fmt.Print("Found no tweets\n")
- }
-
- t.runCh <- RunInfo{Success: true}
-
- // Sleep some time
- time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/cluster_health.go b/vendor/github.com/olivere/elastic/cluster_health.go
deleted file mode 100644
index f960cfe8e..000000000
--- a/vendor/github.com/olivere/elastic/cluster_health.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// ClusterHealthService allows to get a very simple status on the health of the cluster.
-//
-// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-health.html
-// for details.
-type ClusterHealthService struct {
- client *Client
- pretty bool
- indices []string
- level string
- local *bool
- masterTimeout string
- timeout string
- waitForActiveShards *int
- waitForNodes string
- waitForNoRelocatingShards *bool
- waitForStatus string
-}
-
-// NewClusterHealthService creates a new ClusterHealthService.
-func NewClusterHealthService(client *Client) *ClusterHealthService {
- return &ClusterHealthService{
- client: client,
- indices: make([]string, 0),
- }
-}
-
-// Index limits the information returned to specific indices.
-func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService {
- s.indices = append(s.indices, indices...)
- return s
-}
-
-// Level specifies the level of detail for returned information.
-func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
- s.level = level
- return s
-}
-
-// Local indicates whether to return local information. If it is true,
-// we do not retrieve the state from master node (default: false).
-func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
- s.local = &local
- return s
-}
-
-// MasterTimeout specifies an explicit operation timeout for connection to master node.
-func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout specifies an explicit operation timeout.
-func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
- s.timeout = timeout
- return s
-}
-
-// WaitForActiveShards can be used to wait until the specified number of shards are active.
-func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
- s.waitForActiveShards = &waitForActiveShards
- return s
-}
-
-// WaitForNodes can be used to wait until the specified number of nodes are available.
-// Example: "12" to wait for exact values, ">12" and "<12" for ranges.
-func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
- s.waitForNodes = waitForNodes
- return s
-}
-
-// WaitForNoRelocatingShards can be used to wait until all shard relocations are finished.
-func (s *ClusterHealthService) WaitForNoRelocatingShards(waitForNoRelocatingShards bool) *ClusterHealthService {
- s.waitForNoRelocatingShards = &waitForNoRelocatingShards
- return s
-}
-
-// WaitForStatus can be used to wait until the cluster is in a specific state.
-// Valid values are: green, yellow, or red.
-func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
- s.waitForStatus = waitForStatus
- return s
-}
-
-// WaitForGreenStatus will wait for the "green" state.
-func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService {
- return s.WaitForStatus("green")
-}
-
-// WaitForYellowStatus will wait for the "yellow" state.
-func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService {
- return s.WaitForStatus("yellow")
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.indices) > 0 {
- path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{
- "index": strings.Join(s.indices, ","),
- })
- } else {
- path = "/_cluster/health"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.level != "" {
- params.Set("level", s.level)
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.waitForActiveShards != nil {
- params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards))
- }
- if s.waitForNodes != "" {
- params.Set("wait_for_nodes", s.waitForNodes)
- }
- if s.waitForNoRelocatingShards != nil {
- params.Set("wait_for_no_relocating_shards", fmt.Sprintf("%v", *s.waitForNoRelocatingShards))
- }
- if s.waitForStatus != "" {
- params.Set("wait_for_status", s.waitForStatus)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *ClusterHealthService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(ClusterHealthResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// ClusterHealthResponse is the response of ClusterHealthService.Do.
-type ClusterHealthResponse struct {
- ClusterName string `json:"cluster_name"`
- Status string `json:"status"`
- TimedOut bool `json:"timed_out"`
- NumberOfNodes int `json:"number_of_nodes"`
- NumberOfDataNodes int `json:"number_of_data_nodes"`
- ActivePrimaryShards int `json:"active_primary_shards"`
- ActiveShards int `json:"active_shards"`
- RelocatingShards int `json:"relocating_shards"`
- InitializingShards int `json:"initializing_shards"`
- UnassignedShards int `json:"unassigned_shards"`
- DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
- NumberOfPendingTasks int `json:"number_of_pending_tasks"`
- NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"`
- TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"`
- ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
-
- // Validation failures -> index name -> array of validation failures
- ValidationFailures []map[string][]string `json:"validation_failures"`
-
- // Index name -> index health
- Indices map[string]*ClusterIndexHealth `json:"indices"`
-}
-
-// ClusterIndexHealth will be returned as part of ClusterHealthResponse.
-type ClusterIndexHealth struct {
- Status string `json:"status"`
- NumberOfShards int `json:"number_of_shards"`
- NumberOfReplicas int `json:"number_of_replicas"`
- ActivePrimaryShards int `json:"active_primary_shards"`
- ActiveShards int `json:"active_shards"`
- RelocatingShards int `json:"relocating_shards"`
- InitializingShards int `json:"initializing_shards"`
- UnassignedShards int `json:"unassigned_shards"`
- // Validation failures
- ValidationFailures []string `json:"validation_failures"`
- // Shards by id, e.g. "0" or "1"
- Shards map[string]*ClusterShardHealth `json:"shards"`
-}
-
-// ClusterShardHealth will be returned as part of ClusterHealthResponse.
-type ClusterShardHealth struct {
- Status string `json:"status"`
- PrimaryActive bool `json:"primary_active"`
- ActiveShards int `json:"active_shards"`
- RelocatingShards int `json:"relocating_shards"`
- InitializingShards int `json:"initializing_shards"`
- UnassignedShards int `json:"unassigned_shards"`
-}
diff --git a/vendor/github.com/olivere/elastic/cluster_health_test.go b/vendor/github.com/olivere/elastic/cluster_health_test.go
deleted file mode 100644
index c2caee985..000000000
--- a/vendor/github.com/olivere/elastic/cluster_health_test.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "net/url"
- "testing"
-)
-
-func TestClusterHealth(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // Get cluster health
- res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatalf("expected res to be != nil; got: %v", res)
- }
- if res.Status != "green" && res.Status != "red" && res.Status != "yellow" {
- t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status)
- }
-}
-
-func TestClusterHealthURLs(t *testing.T) {
- tests := []struct {
- Service *ClusterHealthService
- ExpectedPath string
- ExpectedParams url.Values
- }{
- {
- Service: &ClusterHealthService{
- indices: []string{},
- },
- ExpectedPath: "/_cluster/health",
- },
- {
- Service: &ClusterHealthService{
- indices: []string{"twitter"},
- },
- ExpectedPath: "/_cluster/health/twitter",
- },
- {
- Service: &ClusterHealthService{
- indices: []string{"twitter", "gplus"},
- },
- ExpectedPath: "/_cluster/health/twitter%2Cgplus",
- },
- {
- Service: &ClusterHealthService{
- indices: []string{"twitter"},
- waitForStatus: "yellow",
- },
- ExpectedPath: "/_cluster/health/twitter",
- ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}},
- },
- }
-
- for _, test := range tests {
- gotPath, gotParams, err := test.Service.buildURL()
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if gotPath != test.ExpectedPath {
- t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
- }
- if gotParams.Encode() != test.ExpectedParams.Encode() {
- t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
- }
- }
-}
-
-func TestClusterHealthWaitForStatus(t *testing.T) {
- client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- // Ensure preconditions are met: A green cluster.
- health, err := client.ClusterHealth().Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if got, want := health.Status, "green"; got != want {
- t.Skipf("precondition failed: expected cluster to be %q, not %q", want, got)
- }
-
- // Cluster health on an index that does not exist should never get to yellow
- health, err = client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected timeout error; got: %v", err)
- }
- if !IsTimeout(err) {
- t.Fatalf("expected timeout error; got: %v", err)
- }
- if health != nil {
- t.Fatalf("expected no response; got: %v", health)
- }
-
- // Cluster wide health
- health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if health.TimedOut != false {
- t.Fatalf("expected no timeout; got: %v "+
- "(does your local cluster contain unassigned shards?)", health.TimedOut)
- }
- if health.Status != "green" {
- t.Fatalf("expected health = %q; got: %q", "green", health.Status)
- }
-
- // Cluster wide health via shortcut on client
- err = client.WaitForGreenStatus("10s")
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/cluster_state.go b/vendor/github.com/olivere/elastic/cluster_state.go
deleted file mode 100644
index 54e9aa428..000000000
--- a/vendor/github.com/olivere/elastic/cluster_state.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// ClusterStateService allows to get a comprehensive state information of the whole cluster.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-state.html
-// for details.
-type ClusterStateService struct {
- client *Client
- pretty bool
- indices []string
- metrics []string
- allowNoIndices *bool
- expandWildcards string
- flatSettings *bool
- ignoreUnavailable *bool
- local *bool
- masterTimeout string
-}
-
-// NewClusterStateService creates a new ClusterStateService.
-func NewClusterStateService(client *Client) *ClusterStateService {
- return &ClusterStateService{
- client: client,
- indices: make([]string, 0),
- metrics: make([]string, 0),
- }
-}
-
-// Index is a list of index names. Use _all or an empty string to
-// perform the operation on all indices.
-func (s *ClusterStateService) Index(indices ...string) *ClusterStateService {
- s.indices = append(s.indices, indices...)
- return s
-}
-
-// Metric limits the information returned to the specified metric.
-// It can be one of: version, master_node, nodes, routing_table, metadata,
-// blocks, or customs.
-func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService {
- s.metrics = append(s.metrics, metrics...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// (This includes `_all` string or when no indices have been specified).
-func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both..
-func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// FlatSettings, when set, returns settings in flat format (default: false).
-func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
- s.flatSettings = &flatSettings
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Local indicates whether to return local information. When set, it does not
-// retrieve the state from master node (default: false).
-func (s *ClusterStateService) Local(local bool) *ClusterStateService {
- s.local = &local
- return s
-}
-
-// MasterTimeout specifies timeout for connection to master.
-func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *ClusterStateService) buildURL() (string, url.Values, error) {
- // Build URL
- metrics := strings.Join(s.metrics, ",")
- if metrics == "" {
- metrics = "_all"
- }
- indices := strings.Join(s.indices, ",")
- if indices == "" {
- indices = "_all"
- }
- path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
- "metrics": metrics,
- "indices": indices,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *ClusterStateService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(ClusterStateResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// ClusterStateResponse is the response of ClusterStateService.Do.
-type ClusterStateResponse struct {
- ClusterName string `json:"cluster_name"`
- Version int64 `json:"version"`
- StateUUID string `json:"state_uuid"`
- MasterNode string `json:"master_node"`
- Blocks map[string]*clusterBlocks `json:"blocks"`
- Nodes map[string]*discoveryNode `json:"nodes"`
- Metadata *clusterStateMetadata `json:"metadata"`
- RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"`
- RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"`
- Customs map[string]interface{} `json:"customs"`
-}
-
-type clusterBlocks struct {
- Global map[string]*clusterBlock `json:"global"` // id -> cluster block
- Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block
-}
-
-type clusterBlock struct {
- Description string `json:"description"`
- Retryable bool `json:"retryable"`
- DisableStatePersistence bool `json:"disable_state_persistence"`
- Levels []string `json:"levels"`
-}
-
-type clusterStateMetadata struct {
- ClusterUUID string `json:"cluster_uuid"`
- Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata
- Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data
- RoutingTable struct {
- Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table
- } `json:"routing_table"`
- RoutingNodes struct {
- Unassigned []*shardRouting `json:"unassigned"`
- Nodes []*shardRouting `json:"nodes"`
- } `json:"routing_nodes"`
- Customs map[string]interface{} `json:"customs"`
-}
-
-type discoveryNode struct {
- Name string `json:"name"` // server name, e.g. "es1"
- TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300]
- Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true }
-}
-
-type clusterStateRoutingTable struct {
- Indices map[string]interface{} `json:"indices"`
-}
-
-type clusterStateRoutingNode struct {
- Unassigned []*shardRouting `json:"unassigned"`
- // Node Id -> shardRouting
- Nodes map[string][]*shardRouting `json:"nodes"`
-}
-
-type indexTemplateMetaData struct {
- IndexPatterns []string `json:"index_patterns"` // e.g. ["store-*"]
- Order int `json:"order"`
- Settings map[string]interface{} `json:"settings"` // index settings
- Mappings map[string]interface{} `json:"mappings"` // type name -> mapping
-}
-
-type indexMetaData struct {
- State string `json:"state"`
- Settings map[string]interface{} `json:"settings"`
- Mappings map[string]interface{} `json:"mappings"`
- Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ]
-}
-
-type indexRoutingTable struct {
- Shards map[string]*shardRouting `json:"shards"`
-}
-
-type shardRouting struct {
- State string `json:"state"`
- Primary bool `json:"primary"`
- Node string `json:"node"`
- RelocatingNode string `json:"relocating_node"`
- Shard int `json:"shard"`
- Index string `json:"index"`
- Version int64 `json:"version"`
- RestoreSource *RestoreSource `json:"restore_source"`
- AllocationId *allocationId `json:"allocation_id"`
- UnassignedInfo *unassignedInfo `json:"unassigned_info"`
-}
-
-type RestoreSource struct {
- Repository string `json:"repository"`
- Snapshot string `json:"snapshot"`
- Version string `json:"version"`
- Index string `json:"index"`
-}
-
-type allocationId struct {
- Id string `json:"id"`
- RelocationId string `json:"relocation_id"`
-}
-
-type unassignedInfo struct {
- Reason string `json:"reason"`
- At string `json:"at"`
- Details string `json:"details"`
-}
diff --git a/vendor/github.com/olivere/elastic/cluster_state_test.go b/vendor/github.com/olivere/elastic/cluster_state_test.go
deleted file mode 100644
index 6eedb0c1b..000000000
--- a/vendor/github.com/olivere/elastic/cluster_state_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "net/url"
- "testing"
-)
-
-func TestClusterState(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // Get cluster state
- res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatalf("expected res to be != nil; got: %v", res)
- }
- if res.ClusterName == "" {
- t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
- }
-}
-
-func TestClusterStateURLs(t *testing.T) {
- tests := []struct {
- Service *ClusterStateService
- ExpectedPath string
- ExpectedParams url.Values
- }{
- {
- Service: &ClusterStateService{
- indices: []string{},
- metrics: []string{},
- },
- ExpectedPath: "/_cluster/state/_all/_all",
- },
- {
- Service: &ClusterStateService{
- indices: []string{"twitter"},
- metrics: []string{},
- },
- ExpectedPath: "/_cluster/state/_all/twitter",
- },
- {
- Service: &ClusterStateService{
- indices: []string{"twitter", "gplus"},
- metrics: []string{},
- },
- ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus",
- },
- {
- Service: &ClusterStateService{
- indices: []string{},
- metrics: []string{"nodes"},
- },
- ExpectedPath: "/_cluster/state/nodes/_all",
- },
- {
- Service: &ClusterStateService{
- indices: []string{"twitter"},
- metrics: []string{"nodes"},
- },
- ExpectedPath: "/_cluster/state/nodes/twitter",
- },
- {
- Service: &ClusterStateService{
- indices: []string{"twitter"},
- metrics: []string{"nodes"},
- masterTimeout: "1s",
- },
- ExpectedPath: "/_cluster/state/nodes/twitter",
- ExpectedParams: url.Values{"master_timeout": []string{"1s"}},
- },
- }
-
- for _, test := range tests {
- gotPath, gotParams, err := test.Service.buildURL()
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if gotPath != test.ExpectedPath {
- t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
- }
- if gotParams.Encode() != test.ExpectedParams.Encode() {
- t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/cluster_stats.go b/vendor/github.com/olivere/elastic/cluster_stats.go
deleted file mode 100644
index 4d05c2e97..000000000
--- a/vendor/github.com/olivere/elastic/cluster_stats.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// ClusterStatsService is documented at
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-stats.html.
-type ClusterStatsService struct {
- client *Client
- pretty bool
- nodeId []string
- flatSettings *bool
- human *bool
-}
-
-// NewClusterStatsService creates a new ClusterStatsService.
-func NewClusterStatsService(client *Client) *ClusterStatsService {
- return &ClusterStatsService{
- client: client,
- nodeId: make([]string, 0),
- }
-}
-
-// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.
-func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService {
- s.nodeId = nodeId
- return s
-}
-
-// FlatSettings is documented as: Return settings in flat format (default: false).
-func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService {
- s.flatSettings = &flatSettings
- return s
-}
-
-// Human is documented as: Whether to return time and byte values in human-readable format..
-func (s *ClusterStatsService) Human(human bool) *ClusterStatsService {
- s.human = &human
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
-
- if len(s.nodeId) > 0 {
- path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{
- "node_id": strings.Join(s.nodeId, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
- } else {
- path, err = uritemplates.Expand("/_cluster/stats", map[string]string{})
- if err != nil {
- return "", url.Values{}, err
- }
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- if s.human != nil {
- params.Set("human", fmt.Sprintf("%v", *s.human))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *ClusterStatsService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(ClusterStatsResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// ClusterStatsResponse is the response of ClusterStatsService.Do.
-type ClusterStatsResponse struct {
- Timestamp int64 `json:"timestamp"`
- ClusterName string `json:"cluster_name"`
- ClusterUUID string `json:"uuid"`
- Status string `json:"status"`
- Indices *ClusterStatsIndices `json:"indices"`
- Nodes *ClusterStatsNodes `json:"nodes"`
-}
-
-type ClusterStatsIndices struct {
- Count int `json:"count"`
- Shards *ClusterStatsIndicesShards `json:"shards"`
- Docs *ClusterStatsIndicesDocs `json:"docs"`
- Store *ClusterStatsIndicesStore `json:"store"`
- FieldData *ClusterStatsIndicesFieldData `json:"fielddata"`
- FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"`
- IdCache *ClusterStatsIndicesIdCache `json:"id_cache"`
- Completion *ClusterStatsIndicesCompletion `json:"completion"`
- Segments *ClusterStatsIndicesSegments `json:"segments"`
- Percolate *ClusterStatsIndicesPercolate `json:"percolate"`
-}
-
-type ClusterStatsIndicesShards struct {
- Total int `json:"total"`
- Primaries int `json:"primaries"`
- Replication float64 `json:"replication"`
- Index *ClusterStatsIndicesShardsIndex `json:"index"`
-}
-
-type ClusterStatsIndicesShardsIndex struct {
- Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"`
- Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"`
- Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"`
-}
-
-type ClusterStatsIndicesShardsIndexIntMinMax struct {
- Min int `json:"min"`
- Max int `json:"max"`
- Avg float64 `json:"avg"`
-}
-
-type ClusterStatsIndicesShardsIndexFloat64MinMax struct {
- Min float64 `json:"min"`
- Max float64 `json:"max"`
- Avg float64 `json:"avg"`
-}
-
-type ClusterStatsIndicesDocs struct {
- Count int `json:"count"`
- Deleted int `json:"deleted"`
-}
-
-type ClusterStatsIndicesStore struct {
- Size string `json:"size"` // e.g. "5.3gb"
- SizeInBytes int64 `json:"size_in_bytes"`
-}
-
-type ClusterStatsIndicesFieldData struct {
- MemorySize string `json:"memory_size"` // e.g. "61.3kb"
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- Evictions int64 `json:"evictions"`
- Fields map[string]struct {
- MemorySize string `json:"memory_size"` // e.g. "61.3kb"
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- } `json:"fields"`
-}
-
-type ClusterStatsIndicesFilterCache struct {
- MemorySize string `json:"memory_size"` // e.g. "61.3kb"
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- Evictions int64 `json:"evictions"`
-}
-
-type ClusterStatsIndicesIdCache struct {
- MemorySize string `json:"memory_size"` // e.g. "61.3kb"
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
-}
-
-type ClusterStatsIndicesCompletion struct {
- Size string `json:"size"` // e.g. "61.3kb"
- SizeInBytes int64 `json:"size_in_bytes"`
- Fields map[string]struct {
- Size string `json:"size"` // e.g. "61.3kb"
- SizeInBytes int64 `json:"size_in_bytes"`
- } `json:"fields"`
-}
-
-type ClusterStatsIndicesSegments struct {
- Count int64 `json:"count"`
- Memory string `json:"memory"` // e.g. "61.3kb"
- MemoryInBytes int64 `json:"memory_in_bytes"`
- IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb"
- IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
- IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb"
- IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
- VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb"
- VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
- FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb"
- FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
-}
-
-type ClusterStatsIndicesPercolate struct {
- Total int64 `json:"total"`
- // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems
- Time string `json:"get_time"` // e.g. "1s"
- TimeInBytes int64 `json:"time_in_millis"`
- Current int64 `json:"current"`
- MemorySize string `json:"memory_size"` // e.g. "61.3kb"
- MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"`
- Queries int64 `json:"queries"`
-}
-
-// ---
-
-type ClusterStatsNodes struct {
- Count *ClusterStatsNodesCount `json:"count"`
- Versions []string `json:"versions"`
- OS *ClusterStatsNodesOsStats `json:"os"`
- Process *ClusterStatsNodesProcessStats `json:"process"`
- JVM *ClusterStatsNodesJvmStats `json:"jvm"`
- FS *ClusterStatsNodesFsStats `json:"fs"`
- Plugins []*ClusterStatsNodesPlugin `json:"plugins"`
-}
-
-type ClusterStatsNodesCount struct {
- Total int `json:"total"`
- Data int `json:"data"`
- CoordinatingOnly int `json:"coordinating_only"`
- Master int `json:"master"`
- Ingest int `json:"ingest"`
-}
-
-type ClusterStatsNodesOsStats struct {
- AvailableProcessors int `json:"available_processors"`
- Mem *ClusterStatsNodesOsStatsMem `json:"mem"`
- CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"`
-}
-
-type ClusterStatsNodesOsStatsMem struct {
- Total string `json:"total"` // e.g. "16gb"
- TotalInBytes int64 `json:"total_in_bytes"`
-}
-
-type ClusterStatsNodesOsStatsCPU struct {
- Vendor string `json:"vendor"`
- Model string `json:"model"`
- MHz int `json:"mhz"`
- TotalCores int `json:"total_cores"`
- TotalSockets int `json:"total_sockets"`
- CoresPerSocket int `json:"cores_per_socket"`
- CacheSize string `json:"cache_size"` // e.g. "256b"
- CacheSizeInBytes int64 `json:"cache_size_in_bytes"`
- Count int `json:"count"`
-}
-
-type ClusterStatsNodesProcessStats struct {
- CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"`
- OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"`
-}
-
-type ClusterStatsNodesProcessStatsCPU struct {
- Percent float64 `json:"percent"`
-}
-
-type ClusterStatsNodesProcessStatsOpenFileDescriptors struct {
- Min int64 `json:"min"`
- Max int64 `json:"max"`
- Avg int64 `json:"avg"`
-}
-
-type ClusterStatsNodesJvmStats struct {
- MaxUptime string `json:"max_uptime"` // e.g. "5h"
- MaxUptimeInMillis int64 `json:"max_uptime_in_millis"`
- Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"`
- Mem *ClusterStatsNodesJvmStatsMem `json:"mem"`
- Threads int64 `json:"threads"`
-}
-
-type ClusterStatsNodesJvmStatsVersion struct {
- Version string `json:"version"` // e.g. "1.8.0_45"
- VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
- VMVersion string `json:"vm_version"` // e.g. "25.45-b02"
- VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
- Count int `json:"count"`
-}
-
-type ClusterStatsNodesJvmStatsMem struct {
- HeapUsed string `json:"heap_used"`
- HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
- HeapMax string `json:"heap_max"`
- HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
-}
-
-type ClusterStatsNodesFsStats struct {
- Path string `json:"path"`
- Mount string `json:"mount"`
- Dev string `json:"dev"`
- Total string `json:"total"` // e.g. "930.7gb"`
- TotalInBytes int64 `json:"total_in_bytes"`
- Free string `json:"free"` // e.g. "930.7gb"`
- FreeInBytes int64 `json:"free_in_bytes"`
- Available string `json:"available"` // e.g. "930.7gb"`
- AvailableInBytes int64 `json:"available_in_bytes"`
- DiskReads int64 `json:"disk_reads"`
- DiskWrites int64 `json:"disk_writes"`
- DiskIOOp int64 `json:"disk_io_op"`
- DiskReadSize string `json:"disk_read_size"` // e.g. "0b"`
- DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"`
- DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"`
- DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"`
- DiskIOSize string `json:"disk_io_size"` // e.g. "0b"`
- DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"`
- DiskQueue string `json:"disk_queue"`
- DiskServiceTime string `json:"disk_service_time"`
-}
-
-type ClusterStatsNodesPlugin struct {
- Name string `json:"name"`
- Version string `json:"version"`
- Description string `json:"description"`
- URL string `json:"url"`
- JVM bool `json:"jvm"`
- Site bool `json:"site"`
-}
diff --git a/vendor/github.com/olivere/elastic/cluster_stats_test.go b/vendor/github.com/olivere/elastic/cluster_stats_test.go
deleted file mode 100644
index fe6da4704..000000000
--- a/vendor/github.com/olivere/elastic/cluster_stats_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "net/url"
- "testing"
-)
-
-func TestClusterStats(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // Get cluster stats
- res, err := client.ClusterStats().Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatalf("expected res to be != nil; got: %v", res)
- }
- if res.ClusterName == "" {
- t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
- }
- if res.Nodes == nil {
- t.Fatalf("expected nodes; got: %v", res.Nodes)
- }
- if res.Nodes.Count == nil {
- t.Fatalf("expected nodes count; got: %v", res.Nodes.Count)
- }
-}
-
-func TestClusterStatsURLs(t *testing.T) {
- fFlag := false
- tFlag := true
-
- tests := []struct {
- Service *ClusterStatsService
- ExpectedPath string
- ExpectedParams url.Values
- }{
- {
- Service: &ClusterStatsService{
- nodeId: []string{},
- },
- ExpectedPath: "/_cluster/stats",
- },
- {
- Service: &ClusterStatsService{
- nodeId: []string{"node1"},
- },
- ExpectedPath: "/_cluster/stats/nodes/node1",
- },
- {
- Service: &ClusterStatsService{
- nodeId: []string{"node1", "node2"},
- },
- ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2",
- },
- {
- Service: &ClusterStatsService{
- nodeId: []string{},
- flatSettings: &tFlag,
- },
- ExpectedPath: "/_cluster/stats",
- ExpectedParams: url.Values{"flat_settings": []string{"true"}},
- },
- {
- Service: &ClusterStatsService{
- nodeId: []string{"node1"},
- flatSettings: &fFlag,
- },
- ExpectedPath: "/_cluster/stats/nodes/node1",
- ExpectedParams: url.Values{"flat_settings": []string{"false"}},
- },
- }
-
- for _, test := range tests {
- gotPath, gotParams, err := test.Service.buildURL()
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if gotPath != test.ExpectedPath {
- t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
- }
- if gotParams.Encode() != test.ExpectedParams.Encode() {
- t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/config/config.go b/vendor/github.com/olivere/elastic/config/config.go
deleted file mode 100644
index a511c4157..000000000
--- a/vendor/github.com/olivere/elastic/config/config.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package config
-
-import (
- "fmt"
- "net/url"
- "strconv"
- "strings"
-)
-
-// Config represents an Elasticsearch configuration.
-type Config struct {
- URL string
- Index string
- Username string
- Password string
- Shards int
- Replicas int
- Sniff *bool
- Infolog string
- Errorlog string
- Tracelog string
-}
-
-// Parse returns the Elasticsearch configuration by extracting it
-// from the URL, its path, and its query string.
-//
-// Example:
-// http://127.0.0.1:9200/store-blobs?shards=1&replicas=0&sniff=false&tracelog=elastic.trace.log
-//
-// The code above will return a URL of http://127.0.0.1:9200, an index name
-// of store-blobs, and the related settings from the query string.
-func Parse(elasticURL string) (*Config, error) {
- cfg := &Config{
- Shards: 1,
- Replicas: 0,
- Sniff: nil,
- }
-
- uri, err := url.Parse(elasticURL)
- if err != nil {
- return nil, fmt.Errorf("error parsing elastic parameter %q: %v", elasticURL, err)
- }
- index := uri.Path
- if strings.HasPrefix(index, "/") {
- index = index[1:]
- }
- if strings.HasSuffix(index, "/") {
- index = index[:len(index)-1]
- }
- if index == "" {
- return nil, fmt.Errorf("missing index in elastic parameter %q", elasticURL)
- }
- if uri.User != nil {
- cfg.Username = uri.User.Username()
- cfg.Password, _ = uri.User.Password()
- }
- uri.User = nil
-
- if i, err := strconv.Atoi(uri.Query().Get("shards")); err == nil {
- cfg.Shards = i
- }
- if i, err := strconv.Atoi(uri.Query().Get("replicas")); err == nil {
- cfg.Replicas = i
- }
- if s := uri.Query().Get("sniff"); s != "" {
- if b, err := strconv.ParseBool(s); err == nil {
- cfg.Sniff = &b
- }
- }
- if s := uri.Query().Get("infolog"); s != "" {
- cfg.Infolog = s
- }
- if s := uri.Query().Get("errorlog"); s != "" {
- cfg.Errorlog = s
- }
- if s := uri.Query().Get("tracelog"); s != "" {
- cfg.Tracelog = s
- }
-
- uri.Path = ""
- uri.RawQuery = ""
- cfg.URL = uri.String()
- cfg.Index = index
-
- return cfg, nil
-}
diff --git a/vendor/github.com/olivere/elastic/config/config_test.go b/vendor/github.com/olivere/elastic/config/config_test.go
deleted file mode 100644
index caa3bbadb..000000000
--- a/vendor/github.com/olivere/elastic/config/config_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package config
-
-import "testing"
-
-func TestParse(t *testing.T) {
- urls := "http://user:pwd@elastic:19220/store-blobs?shards=5&replicas=2&sniff=true&errorlog=elastic.error.log&infolog=elastic.info.log&tracelog=elastic.trace.log"
- cfg, err := Parse(urls)
- if err != nil {
- t.Fatal(err)
- }
- if want, got := "http://elastic:19220", cfg.URL; want != got {
- t.Fatalf("expected URL = %q, got %q", want, got)
- }
- if want, got := "store-blobs", cfg.Index; want != got {
- t.Fatalf("expected Index = %q, got %q", want, got)
- }
- if want, got := "user", cfg.Username; want != got {
- t.Fatalf("expected Username = %q, got %q", want, got)
- }
- if want, got := "pwd", cfg.Password; want != got {
- t.Fatalf("expected Password = %q, got %q", want, got)
- }
- if want, got := 5, cfg.Shards; want != got {
- t.Fatalf("expected Shards = %v, got %v", want, got)
- }
- if want, got := 2, cfg.Replicas; want != got {
- t.Fatalf("expected Replicas = %v, got %v", want, got)
- }
- if want, got := true, *cfg.Sniff; want != got {
- t.Fatalf("expected Sniff = %v, got %v", want, got)
- }
- if want, got := "elastic.error.log", cfg.Errorlog; want != got {
- t.Fatalf("expected Errorlog = %q, got %q", want, got)
- }
- if want, got := "elastic.info.log", cfg.Infolog; want != got {
- t.Fatalf("expected Infolog = %q, got %q", want, got)
- }
- if want, got := "elastic.trace.log", cfg.Tracelog; want != got {
- t.Fatalf("expected Tracelog = %q, got %q", want, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/config/doc.go b/vendor/github.com/olivere/elastic/config/doc.go
deleted file mode 100644
index c9acd5ff1..000000000
--- a/vendor/github.com/olivere/elastic/config/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-/*
-Package config allows parsing a configuration for Elasticsearch
-from a URL.
-*/
-package config
diff --git a/vendor/github.com/olivere/elastic/connection.go b/vendor/github.com/olivere/elastic/connection.go
deleted file mode 100644
index 0f27a8756..000000000
--- a/vendor/github.com/olivere/elastic/connection.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "fmt"
- "sync"
- "time"
-)
-
-// conn represents a single connection to a node in a cluster.
-type conn struct {
- sync.RWMutex
- nodeID string // node ID
- url string
- failures int
- dead bool
- deadSince *time.Time
-}
-
-// newConn creates a new connection to the given URL.
-func newConn(nodeID, url string) *conn {
- c := &conn{
- nodeID: nodeID,
- url: url,
- }
- return c
-}
-
-// String returns a representation of the connection status.
-func (c *conn) String() string {
- c.RLock()
- defer c.RUnlock()
- return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
-}
-
-// NodeID returns the ID of the node of this connection.
-func (c *conn) NodeID() string {
- c.RLock()
- defer c.RUnlock()
- return c.nodeID
-}
-
-// URL returns the URL of this connection.
-func (c *conn) URL() string {
- c.RLock()
- defer c.RUnlock()
- return c.url
-}
-
-// IsDead returns true if this connection is marked as dead, i.e. a previous
-// request to the URL has been unsuccessful.
-func (c *conn) IsDead() bool {
- c.RLock()
- defer c.RUnlock()
- return c.dead
-}
-
-// MarkAsDead marks this connection as dead, increments the failures
-// counter and stores the current time in dead since.
-func (c *conn) MarkAsDead() {
- c.Lock()
- c.dead = true
- if c.deadSince == nil {
- utcNow := time.Now().UTC()
- c.deadSince = &utcNow
- }
- c.failures += 1
- c.Unlock()
-}
-
-// MarkAsAlive marks this connection as eligible to be returned from the
-// pool of connections by the selector.
-func (c *conn) MarkAsAlive() {
- c.Lock()
- c.dead = false
- c.Unlock()
-}
-
-// MarkAsHealthy marks this connection as healthy, i.e. a request has been
-// successfully performed with it.
-func (c *conn) MarkAsHealthy() {
- c.Lock()
- c.dead = false
- c.deadSince = nil
- c.failures = 0
- c.Unlock()
-}
diff --git a/vendor/github.com/olivere/elastic/count.go b/vendor/github.com/olivere/elastic/count.go
deleted file mode 100644
index 44416fab0..000000000
--- a/vendor/github.com/olivere/elastic/count.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// CountService is a convenient service for determining the
-// number of documents in an index. Use SearchService with
-// a SearchType of count for counting with queries etc.
-type CountService struct {
- client *Client
- pretty bool
- index []string
- typ []string
- allowNoIndices *bool
- analyzeWildcard *bool
- analyzer string
- defaultOperator string
- df string
- expandWildcards string
- ignoreUnavailable *bool
- lenient *bool
- lowercaseExpandedTerms *bool
- minScore interface{}
- preference string
- q string
- query Query
- routing string
- bodyJson interface{}
- bodyString string
-}
-
-// NewCountService creates a new CountService.
-func NewCountService(client *Client) *CountService {
- return &CountService{
- client: client,
- }
-}
-
-// Index sets the names of the indices to restrict the results.
-func (s *CountService) Index(index ...string) *CountService {
- if s.index == nil {
- s.index = make([]string, 0)
- }
- s.index = append(s.index, index...)
- return s
-}
-
-// Type sets the types to use to restrict the results.
-func (s *CountService) Type(typ ...string) *CountService {
- if s.typ == nil {
- s.typ = make([]string, 0)
- }
- s.typ = append(s.typ, typ...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices. (This includes "_all" string
-// or when no indices have been specified).
-func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// AnalyzeWildcard specifies whether wildcard and prefix queries should be
-// analyzed (default: false).
-func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService {
- s.analyzeWildcard = &analyzeWildcard
- return s
-}
-
-// Analyzer specifies the analyzer to use for the query string.
-func (s *CountService) Analyzer(analyzer string) *CountService {
- s.analyzer = analyzer
- return s
-}
-
-// DefaultOperator specifies the default operator for query string query (AND or OR).
-func (s *CountService) DefaultOperator(defaultOperator string) *CountService {
- s.defaultOperator = defaultOperator
- return s
-}
-
-// Df specifies the field to use as default where no field prefix is given
-// in the query string.
-func (s *CountService) Df(df string) *CountService {
- s.df = df
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *CountService) ExpandWildcards(expandWildcards string) *CountService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Lenient specifies whether format-based query failures (such as
-// providing text to a numeric field) should be ignored.
-func (s *CountService) Lenient(lenient bool) *CountService {
- s.lenient = &lenient
- return s
-}
-
-// LowercaseExpandedTerms specifies whether query terms should be lowercased.
-func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService {
- s.lowercaseExpandedTerms = &lowercaseExpandedTerms
- return s
-}
-
-// MinScore indicates to include only documents with a specific `_score`
-// value in the result.
-func (s *CountService) MinScore(minScore interface{}) *CountService {
- s.minScore = minScore
- return s
-}
-
-// Preference specifies the node or shard the operation should be
-// performed on (default: random).
-func (s *CountService) Preference(preference string) *CountService {
- s.preference = preference
- return s
-}
-
-// Q in the Lucene query string syntax. You can also use Query to pass
-// a Query struct.
-func (s *CountService) Q(q string) *CountService {
- s.q = q
- return s
-}
-
-// Query specifies the query to pass. You can also pass a query string with Q.
-func (s *CountService) Query(query Query) *CountService {
- s.query = query
- return s
-}
-
-// Routing specifies the routing value.
-func (s *CountService) Routing(routing string) *CountService {
- s.routing = routing
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *CountService) Pretty(pretty bool) *CountService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson specifies the query to restrict the results specified with the
-// Query DSL (optional). The interface{} will be serialized to a JSON document,
-// so use a map[string]interface{}.
-func (s *CountService) BodyJson(body interface{}) *CountService {
- s.bodyJson = body
- return s
-}
-
-// Body specifies a query to restrict the results specified with
-// the Query DSL (optional).
-func (s *CountService) BodyString(body string) *CountService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *CountService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- if len(s.index) > 0 && len(s.typ) > 0 {
- path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{
- "index": strings.Join(s.index, ","),
- "type": strings.Join(s.typ, ","),
- })
- } else if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_count", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else if len(s.typ) > 0 {
- path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{
- "type": strings.Join(s.typ, ","),
- })
- } else {
- path = "/_all/_count"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.analyzeWildcard != nil {
- params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
- }
- if s.analyzer != "" {
- params.Set("analyzer", s.analyzer)
- }
- if s.defaultOperator != "" {
- params.Set("default_operator", s.defaultOperator)
- }
- if s.df != "" {
- params.Set("df", s.df)
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.lenient != nil {
- params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
- }
- if s.lowercaseExpandedTerms != nil {
- params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
- }
- if s.minScore != nil {
- params.Set("min_score", fmt.Sprintf("%v", s.minScore))
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if s.q != "" {
- params.Set("q", s.q)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *CountService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *CountService) Do(ctx context.Context) (int64, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return 0, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return 0, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.query != nil {
- src, err := s.query.Source()
- if err != nil {
- return 0, err
- }
- query := make(map[string]interface{})
- query["query"] = src
- body = query
- } else if s.bodyJson != nil {
- body = s.bodyJson
- } else if s.bodyString != "" {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return 0, err
- }
-
- // Return result
- ret := new(CountResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return 0, err
- }
- if ret != nil {
- return ret.Count, nil
- }
-
- return int64(0), nil
-}
-
-// CountResponse is the response of using the Count API.
-type CountResponse struct {
- Count int64 `json:"count"`
- Shards shardsInfo `json:"_shards,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/count_test.go b/vendor/github.com/olivere/elastic/count_test.go
deleted file mode 100644
index a0ee52112..000000000
--- a/vendor/github.com/olivere/elastic/count_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestCountURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Types []string
- Expected string
- }{
- {
- []string{},
- []string{},
- "/_all/_count",
- },
- {
- []string{},
- []string{"tweet"},
- "/_all/tweet/_count",
- },
- {
- []string{"twitter-*"},
- []string{"tweet", "follower"},
- "/twitter-%2A/tweet%2Cfollower/_count",
- },
- {
- []string{"twitter-2014", "twitter-2015"},
- []string{"tweet", "follower"},
- "/twitter-2014%2Ctwitter-2015/tweet%2Cfollower/_count",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.Count().Index(test.Indices...).Type(test.Types...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestCount(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Count documents
- count, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 3 {
- t.Errorf("expected Count = %d; got %d", 3, count)
- }
-
- // Count documents
- count, err = client.Count(testIndexName).Type("doc").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 3 {
- t.Errorf("expected Count = %d; got %d", 3, count)
- }
-
- // Count documents
- count, err = client.Count(testIndexName).Type("gezwitscher").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 0 {
- t.Errorf("expected Count = %d; got %d", 0, count)
- }
-
- // Count with query
- query := NewTermQuery("user", "olivere")
- count, err = client.Count(testIndexName).Query(query).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 2 {
- t.Errorf("expected Count = %d; got %d", 2, count)
- }
-
- // Count with query and type
- query = NewTermQuery("user", "olivere")
- count, err = client.Count(testIndexName).Type("doc").Query(query).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 2 {
- t.Errorf("expected Count = %d; got %d", 2, count)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/decoder.go b/vendor/github.com/olivere/elastic/decoder.go
deleted file mode 100644
index 9cd2cf720..000000000
--- a/vendor/github.com/olivere/elastic/decoder.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
-)
-
-// Decoder is used to decode responses from Elasticsearch.
-// Users of elastic can implement their own marshaler for advanced purposes
-// and set them per Client (see SetDecoder). If none is specified,
-// DefaultDecoder is used.
-type Decoder interface {
- Decode(data []byte, v interface{}) error
-}
-
-// DefaultDecoder uses json.Unmarshal from the Go standard library
-// to decode JSON data.
-type DefaultDecoder struct{}
-
-// Decode decodes with json.Unmarshal from the Go standard library.
-func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
- return json.Unmarshal(data, v)
-}
diff --git a/vendor/github.com/olivere/elastic/decoder_test.go b/vendor/github.com/olivere/elastic/decoder_test.go
deleted file mode 100644
index 2c3dde8ca..000000000
--- a/vendor/github.com/olivere/elastic/decoder_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "sync/atomic"
- "testing"
-)
-
-type decoder struct {
- dec json.Decoder
-
- N int64
-}
-
-func (d *decoder) Decode(data []byte, v interface{}) error {
- atomic.AddInt64(&d.N, 1)
- dec := json.NewDecoder(bytes.NewReader(data))
- dec.UseNumber()
- return dec.Decode(v)
-}
-
-func TestDecoder(t *testing.T) {
- dec := &decoder{}
- client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0))
-
- tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
-
- // Add a document
- indexResult, err := client.Index().
- Index(testIndexName).
- Type("doc").
- Id("1").
- BodyJson(&tweet).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if indexResult == nil {
- t.Errorf("expected result to be != nil; got: %v", indexResult)
- }
- if dec.N == 0 {
- t.Errorf("expected at least 1 call of decoder; got: %d", dec.N)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/delete.go b/vendor/github.com/olivere/elastic/delete.go
deleted file mode 100644
index 1e20de11f..000000000
--- a/vendor/github.com/olivere/elastic/delete.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// DeleteService allows to delete a typed JSON document from a specified
-// index based on its id.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-delete.html
-// for details.
-type DeleteService struct {
- client *Client
- pretty bool
- id string
- index string
- typ string
- routing string
- timeout string
- version interface{}
- versionType string
- waitForActiveShards string
- parent string
- refresh string
-}
-
-// NewDeleteService creates a new DeleteService.
-func NewDeleteService(client *Client) *DeleteService {
- return &DeleteService{
- client: client,
- }
-}
-
-// Type is the type of the document.
-func (s *DeleteService) Type(typ string) *DeleteService {
- s.typ = typ
- return s
-}
-
-// Id is the document ID.
-func (s *DeleteService) Id(id string) *DeleteService {
- s.id = id
- return s
-}
-
-// Index is the name of the index.
-func (s *DeleteService) Index(index string) *DeleteService {
- s.index = index
- return s
-}
-
-// Routing is a specific routing value.
-func (s *DeleteService) Routing(routing string) *DeleteService {
- s.routing = routing
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *DeleteService) Timeout(timeout string) *DeleteService {
- s.timeout = timeout
- return s
-}
-
-// Version is an explicit version number for concurrency control.
-func (s *DeleteService) Version(version interface{}) *DeleteService {
- s.version = version
- return s
-}
-
-// VersionType is a specific version type.
-func (s *DeleteService) VersionType(versionType string) *DeleteService {
- s.versionType = versionType
- return s
-}
-
-// WaitForActiveShards sets the number of shard copies that must be active
-// before proceeding with the delete operation. Defaults to 1, meaning the
-// primary shard only. Set to `all` for all shard copies, otherwise set to
-// any non-negative value less than or equal to the total number of copies
-// for the shard (number of replicas + 1).
-func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// Parent is the ID of parent document.
-func (s *DeleteService) Parent(parent string) *DeleteService {
- s.parent = parent
- return s
-}
-
-// Refresh the index after performing the operation.
-func (s *DeleteService) Refresh(refresh string) *DeleteService {
- s.refresh = refresh
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *DeleteService) Pretty(pretty bool) *DeleteService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *DeleteService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
- "index": s.index,
- "type": s.typ,
- "id": s.id,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
- if s.parent != "" {
- params.Set("parent", s.parent)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *DeleteService) Validate() error {
- var invalid []string
- if s.typ == "" {
- invalid = append(invalid, "Type")
- }
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation. If the document is not found (404), Elasticsearch will
-// still return a response. This response is serialized and returned as well. In other
-// words, for HTTP status code 404, both an error and a response might be returned.
-func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "DELETE",
- Path: path,
- Params: params,
- IgnoreErrors: []int{http.StatusNotFound},
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(DeleteResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
-
- // If we have a 404, we return both a result and an error, just like ES does
- if res.StatusCode == http.StatusNotFound {
- return ret, &Error{Status: http.StatusNotFound}
- }
-
- return ret, nil
-}
-
-// -- Result of a delete request.
-
-// DeleteResponse is the outcome of running DeleteService.Do.
-type DeleteResponse struct {
- Index string `json:"_index,omitempty"`
- Type string `json:"_type,omitempty"`
- Id string `json:"_id,omitempty"`
- Version int64 `json:"_version,omitempty"`
- Result string `json:"result,omitempty"`
- Shards *shardsInfo `json:"_shards,omitempty"`
- SeqNo int64 `json:"_seq_no,omitempty"`
- PrimaryTerm int64 `json:"_primary_term,omitempty"`
- Status int `json:"status,omitempty"`
- ForcedRefresh bool `json:"forced_refresh,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/delete_by_query.go b/vendor/github.com/olivere/elastic/delete_by_query.go
deleted file mode 100644
index 694d81c2a..000000000
--- a/vendor/github.com/olivere/elastic/delete_by_query.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// DeleteByQueryService deletes documents that match a query.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-delete-by-query.html.
-type DeleteByQueryService struct {
- client *Client
- index []string
- typ []string
- query Query
- body interface{}
- xSource []string
- xSourceExclude []string
- xSourceInclude []string
- analyzer string
- analyzeWildcard *bool
- allowNoIndices *bool
- conflicts string
- defaultOperator string
- df string
- docvalueFields []string
- expandWildcards string
- explain *bool
- from *int
- ignoreUnavailable *bool
- lenient *bool
- lowercaseExpandedTerms *bool
- preference string
- q string
- refresh string
- requestCache *bool
- requestsPerSecond *int
- routing []string
- scroll string
- scrollSize *int
- searchTimeout string
- searchType string
- size *int
- sort []string
- stats []string
- storedFields []string
- suggestField string
- suggestMode string
- suggestSize *int
- suggestText string
- terminateAfter *int
- timeout string
- trackScores *bool
- version *bool
- waitForActiveShards string
- waitForCompletion *bool
- pretty bool
-}
-
-// NewDeleteByQueryService creates a new DeleteByQueryService.
-// You typically use the client's DeleteByQuery to get a reference to
-// the service.
-func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
- builder := &DeleteByQueryService{
- client: client,
- }
- return builder
-}
-
-// Index sets the indices on which to perform the delete operation.
-func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService {
- s.index = append(s.index, index...)
- return s
-}
-
-// Type limits the delete operation to the given types.
-func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService {
- s.typ = append(s.typ, typ...)
- return s
-}
-
-// XSource is true or false to return the _source field or not,
-// or a list of fields to return.
-func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService {
- s.xSource = append(s.xSource, xSource...)
- return s
-}
-
-// XSourceExclude represents a list of fields to exclude from the returned _source field.
-func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService {
- s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
- return s
-}
-
-// XSourceInclude represents a list of fields to extract and return from the _source field.
-func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService {
- s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
- return s
-}
-
-// Analyzer to use for the query string.
-func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
- s.analyzer = analyzer
- return s
-}
-
-// AnalyzeWildcard specifies whether wildcard and prefix queries should be
-// analyzed (default: false).
-func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService {
- s.analyzeWildcard = &analyzeWildcard
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices (including the _all string
-// or when no indices have been specified).
-func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
- s.allowNoIndices = &allow
- return s
-}
-
-// Conflicts indicates what to do when the process detects version conflicts.
-// Possible values are "proceed" and "abort".
-func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService {
- s.conflicts = conflicts
- return s
-}
-
-// AbortOnVersionConflict aborts the request on version conflicts.
-// It is an alias to setting Conflicts("abort").
-func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService {
- s.conflicts = "abort"
- return s
-}
-
-// ProceedOnVersionConflict aborts the request on version conflicts.
-// It is an alias to setting Conflicts("proceed").
-func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService {
- s.conflicts = "proceed"
- return s
-}
-
-// DefaultOperator for query string query (AND or OR).
-func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
- s.defaultOperator = defaultOperator
- return s
-}
-
-// DF is the field to use as default where no field prefix is given in the query string.
-func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
- s.df = defaultField
- return s
-}
-
-// DefaultField is the field to use as default where no field prefix is given in the query string.
-// It is an alias to the DF func.
-func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
- s.df = defaultField
- return s
-}
-
-// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit.
-func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService {
- s.docvalueFields = docvalueFields
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both. It can be "open" or "closed".
-func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
- s.expandWildcards = expand
- return s
-}
-
-// Explain specifies whether to return detailed information about score
-// computation as part of a hit.
-func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService {
- s.explain = &explain
- return s
-}
-
-// From is the starting offset (default: 0).
-func (s *DeleteByQueryService) From(from int) *DeleteByQueryService {
- s.from = &from
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
- s.ignoreUnavailable = &ignore
- return s
-}
-
-// Lenient specifies whether format-based query failures
-// (such as providing text to a numeric field) should be ignored.
-func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService {
- s.lenient = &lenient
- return s
-}
-
-// LowercaseExpandedTerms specifies whether query terms should be lowercased.
-func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService {
- s.lowercaseExpandedTerms = &lowercaseExpandedTerms
- return s
-}
-
-// Preference specifies the node or shard the operation should be performed on
-// (default: random).
-func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService {
- s.preference = preference
- return s
-}
-
-// Q specifies the query in Lucene query string syntax. You can also use
-// Query to programmatically specify the query.
-func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
- s.q = query
- return s
-}
-
-// QueryString is an alias to Q. Notice that you can also use Query to
-// programmatically set the query.
-func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
- s.q = query
- return s
-}
-
-// Query sets the query programmatically.
-func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
- s.query = query
- return s
-}
-
-// Refresh indicates whether the effected indexes should be refreshed.
-func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService {
- s.refresh = refresh
- return s
-}
-
-// RequestCache specifies if request cache should be used for this request
-// or not, defaults to index level setting.
-func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService {
- s.requestCache = &requestCache
- return s
-}
-
-// RequestsPerSecond sets the throttle on this request in sub-requests per second.
-// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
-func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService {
- s.requestsPerSecond = &requestsPerSecond
- return s
-}
-
-// Routing is a list of specific routing values.
-func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService {
- s.routing = append(s.routing, routing...)
- return s
-}
-
-// Scroll specifies how long a consistent view of the index should be maintained
-// for scrolled search.
-func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService {
- s.scroll = scroll
- return s
-}
-
-// ScrollSize is the size on the scroll request powering the update_by_query.
-func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService {
- s.scrollSize = &scrollSize
- return s
-}
-
-// SearchTimeout defines an explicit timeout for each search request.
-// Defaults to no timeout.
-func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService {
- s.searchTimeout = searchTimeout
- return s
-}
-
-// SearchType is the search operation type. Possible values are
-// "query_then_fetch" and "dfs_query_then_fetch".
-func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService {
- s.searchType = searchType
- return s
-}
-
-// Size represents the number of hits to return (default: 10).
-func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService {
- s.size = &size
- return s
-}
-
-// Sort is a list of <field>:<direction> pairs.
-func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService {
- s.sort = append(s.sort, sort...)
- return s
-}
-
-// SortByField adds a sort order.
-func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService {
- if ascending {
- s.sort = append(s.sort, fmt.Sprintf("%s:asc", field))
- } else {
- s.sort = append(s.sort, fmt.Sprintf("%s:desc", field))
- }
- return s
-}
-
-// Stats specifies specific tag(s) of the request for logging and statistical purposes.
-func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService {
- s.stats = append(s.stats, stats...)
- return s
-}
-
-// StoredFields specifies the list of stored fields to return as part of a hit.
-func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService {
- s.storedFields = storedFields
- return s
-}
-
-// SuggestField specifies which field to use for suggestions.
-func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService {
- s.suggestField = suggestField
- return s
-}
-
-// SuggestMode specifies the suggest mode. Possible values are
-// "missing", "popular", and "always".
-func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService {
- s.suggestMode = suggestMode
- return s
-}
-
-// SuggestSize specifies how many suggestions to return in response.
-func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService {
- s.suggestSize = &suggestSize
- return s
-}
-
-// SuggestText specifies the source text for which the suggestions should be returned.
-func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService {
- s.suggestText = suggestText
- return s
-}
-
-// TerminateAfter indicates the maximum number of documents to collect
-// for each shard, upon reaching which the query execution will terminate early.
-func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService {
- s.terminateAfter = &terminateAfter
- return s
-}
-
-// Timeout is the time each individual bulk request should wait for shards
-// that are unavailable.
-func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
- s.timeout = timeout
- return s
-}
-
-// TimeoutInMillis sets the timeout in milliseconds.
-func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService {
- s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
- return s
-}
-
-// TrackScores indicates whether to calculate and return scores even if
-// they are not used for sorting.
-func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService {
- s.trackScores = &trackScores
- return s
-}
-
-// Version specifies whether to return document version as part of a hit.
-func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService {
- s.version = &version
- return s
-}
-
-// WaitForActiveShards sets the number of shard copies that must be active before proceeding
-// with the update by query operation. Defaults to 1, meaning the primary shard only.
-// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal
-// to the total number of copies for the shard (number of replicas + 1).
-func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// WaitForCompletion indicates if the request should block until the reindex is complete.
-func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService {
- s.waitForCompletion = &waitForCompletion
- return s
-}
-
-// Pretty indents the JSON output from Elasticsearch.
-func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
- s.pretty = pretty
- return s
-}
-
-// Body specifies the body of the request. It overrides data being specified via SearchService.
-func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService {
- s.body = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *DeleteByQueryService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.typ) > 0 {
- path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{
- "index": strings.Join(s.index, ","),
- "type": strings.Join(s.typ, ","),
- })
- } else {
- path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if len(s.xSource) > 0 {
- params.Set("_source", strings.Join(s.xSource, ","))
- }
- if len(s.xSourceExclude) > 0 {
- params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
- }
- if len(s.xSourceInclude) > 0 {
- params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
- }
- if s.analyzer != "" {
- params.Set("analyzer", s.analyzer)
- }
- if s.analyzeWildcard != nil {
- params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
- }
- if s.defaultOperator != "" {
- params.Set("default_operator", s.defaultOperator)
- }
- if s.df != "" {
- params.Set("df", s.df)
- }
- if s.explain != nil {
- params.Set("explain", fmt.Sprintf("%v", *s.explain))
- }
- if len(s.storedFields) > 0 {
- params.Set("stored_fields", strings.Join(s.storedFields, ","))
- }
- if len(s.docvalueFields) > 0 {
- params.Set("docvalue_fields", strings.Join(s.docvalueFields, ","))
- }
- if s.from != nil {
- params.Set("from", fmt.Sprintf("%d", *s.from))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.conflicts != "" {
- params.Set("conflicts", s.conflicts)
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.lenient != nil {
- params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
- }
- if s.lowercaseExpandedTerms != nil {
- params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if s.q != "" {
- params.Set("q", s.q)
- }
- if len(s.routing) > 0 {
- params.Set("routing", strings.Join(s.routing, ","))
- }
- if s.scroll != "" {
- params.Set("scroll", s.scroll)
- }
- if s.searchType != "" {
- params.Set("search_type", s.searchType)
- }
- if s.searchTimeout != "" {
- params.Set("search_timeout", s.searchTimeout)
- }
- if s.size != nil {
- params.Set("size", fmt.Sprintf("%d", *s.size))
- }
- if len(s.sort) > 0 {
- params.Set("sort", strings.Join(s.sort, ","))
- }
- if s.terminateAfter != nil {
- params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
- }
- if len(s.stats) > 0 {
- params.Set("stats", strings.Join(s.stats, ","))
- }
- if s.suggestField != "" {
- params.Set("suggest_field", s.suggestField)
- }
- if s.suggestMode != "" {
- params.Set("suggest_mode", s.suggestMode)
- }
- if s.suggestSize != nil {
- params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize))
- }
- if s.suggestText != "" {
- params.Set("suggest_text", s.suggestText)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.trackScores != nil {
- params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores))
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", *s.version))
- }
- if s.requestCache != nil {
- params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
- if s.scrollSize != nil {
- params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize))
- }
- if s.waitForCompletion != nil {
- params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
- }
- if s.requestsPerSecond != nil {
- params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
- }
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *DeleteByQueryService) Validate() error {
- var invalid []string
- if len(s.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the delete-by-query operation.
-func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Set body if there is a query set
- var body interface{}
- if s.body != nil {
- body = s.body
- } else if s.query != nil {
- src, err := s.query.Source()
- if err != nil {
- return nil, err
- }
- body = map[string]interface{}{
- "query": src,
- }
- }
-
- // Get response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return result
- ret := new(BulkIndexByScrollResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// BulkIndexByScrollResponse is the outcome of executing Do with
-// DeleteByQueryService and UpdateByQueryService.
-type BulkIndexByScrollResponse struct {
- Took int64 `json:"took"`
- SliceId *int64 `json:"slice_id,omitempty"`
- TimedOut bool `json:"timed_out"`
- Total int64 `json:"total"`
- Updated int64 `json:"updated,omitempty"`
- Created int64 `json:"created,omitempty"`
- Deleted int64 `json:"deleted"`
- Batches int64 `json:"batches"`
- VersionConflicts int64 `json:"version_conflicts"`
- Noops int64 `json:"noops"`
- Retries struct {
- Bulk int64 `json:"bulk"`
- Search int64 `json:"search"`
- } `json:"retries,omitempty"`
- Throttled string `json:"throttled"`
- ThrottledMillis int64 `json:"throttled_millis"`
- RequestsPerSecond float64 `json:"requests_per_second"`
- Canceled string `json:"canceled,omitempty"`
- ThrottledUntil string `json:"throttled_until"`
- ThrottledUntilMillis int64 `json:"throttled_until_millis"`
- Failures []bulkIndexByScrollResponseFailure `json:"failures"`
-}
-
-type bulkIndexByScrollResponseFailure struct {
- Index string `json:"index,omitempty"`
- Type string `json:"type,omitempty"`
- Id string `json:"id,omitempty"`
- Status int `json:"status,omitempty"`
- Shard int `json:"shard,omitempty"`
- Node int `json:"node,omitempty"`
- // TOOD "cause" contains exception details
- // TOOD "reason" contains exception details
-}
diff --git a/vendor/github.com/olivere/elastic/delete_by_query_test.go b/vendor/github.com/olivere/elastic/delete_by_query_test.go
deleted file mode 100644
index 40e45b871..000000000
--- a/vendor/github.com/olivere/elastic/delete_by_query_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestDeleteByQueryBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Indices []string
- Types []string
- Expected string
- ExpectErr bool
- }{
- {
- []string{},
- []string{},
- "",
- true,
- },
- {
- []string{"index1"},
- []string{},
- "/index1/_delete_by_query",
- false,
- },
- {
- []string{"index1", "index2"},
- []string{},
- "/index1%2Cindex2/_delete_by_query",
- false,
- },
- {
- []string{},
- []string{"type1"},
- "",
- true,
- },
- {
- []string{"index1"},
- []string{"type1"},
- "/index1/type1/_delete_by_query",
- false,
- },
- {
- []string{"index1", "index2"},
- []string{"type1", "type2"},
- "/index1%2Cindex2/type1%2Ctype2/_delete_by_query",
- false,
- },
- }
-
- for i, test := range tests {
- builder := client.DeleteByQuery().Index(test.Indices...).Type(test.Types...)
- err := builder.Validate()
- if err != nil {
- if !test.ExpectErr {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- } else {
- // err == nil
- if test.ExpectErr {
- t.Errorf("case #%d: expected error", i+1)
- continue
- }
- path, _, _ := builder.buildURL()
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
- }
-}
-
-func TestDeleteByQuery(t *testing.T) {
- // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Count documents
- count, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 3 {
- t.Fatalf("expected count = %d; got: %d", 3, count)
- }
-
- // Delete all documents by sandrae
- q := NewTermQuery("user", "sandrae")
- res, err := client.DeleteByQuery().
- Index(testIndexName).
- Type("doc").
- Query(q).
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatalf("expected response != nil; got: %v", res)
- }
-
- // Flush and check count
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- count, err = client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 2 {
- t.Fatalf("expected Count = %d; got: %d", 2, count)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/delete_test.go b/vendor/github.com/olivere/elastic/delete_test.go
deleted file mode 100644
index 571fcf589..000000000
--- a/vendor/github.com/olivere/elastic/delete_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestDelete(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Count documents
- count, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 3 {
- t.Errorf("expected Count = %d; got %d", 3, count)
- }
-
- // Delete document 1
- res, err := client.Delete().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if want, have := "deleted", res.Result; want != have {
- t.Errorf("expected Result = %q; got %q", want, have)
- }
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- count, err = client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 2 {
- t.Errorf("expected Count = %d; got %d", 2, count)
- }
-
- // Delete non existent document 99
- res, err = client.Delete().Index(testIndexName).Type("doc").Id("99").Refresh("true").Do(context.TODO())
- if err == nil {
- t.Fatal("expected error")
- }
- if !IsNotFound(err) {
- t.Fatalf("expected 404, got: %v", err)
- }
- if _, ok := err.(*Error); !ok {
- t.Fatalf("expected error type *Error, got: %T", err)
- }
- if res == nil {
- t.Fatal("expected response")
- }
- if have, want := res.Id, "99"; have != want {
- t.Errorf("expected _id = %q, got %q", have, want)
- }
- if have, want := res.Index, testIndexName; have != want {
- t.Errorf("expected _index = %q, got %q", have, want)
- }
- if have, want := res.Type, "doc"; have != want {
- t.Errorf("expected _type = %q, got %q", have, want)
- }
- if have, want := res.Result, "not_found"; have != want {
- t.Errorf("expected Result = %q, got %q", have, want)
- }
-
- count, err = client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 2 {
- t.Errorf("expected Count = %d; got %d", 2, count)
- }
-}
-
-func TestDeleteValidate(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // No index name -> fail with error
- res, err := NewDeleteService(client).Type("doc").Id("1").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Delete to fail without index name")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-
- // No type -> fail with error
- res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Delete to fail without type")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-
- // No id -> fail with error
- res, err = NewDeleteService(client).Index(testIndexName).Type("doc").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Delete to fail without id")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/doc.go b/vendor/github.com/olivere/elastic/doc.go
deleted file mode 100644
index ea16d6698..000000000
--- a/vendor/github.com/olivere/elastic/doc.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-/*
-Package elastic provides an interface to the Elasticsearch server
-(https://www.elastic.co/products/elasticsearch).
-
-The first thing you do is to create a Client. If you have Elasticsearch
-installed and running with its default settings
-(i.e. available at http://127.0.0.1:9200), all you need to do is:
-
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- }
-
-If your Elasticsearch server is running on a different IP and/or port,
-just provide a URL to NewClient:
-
- // Create a client and connect to http://192.168.2.10:9201
- client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
- if err != nil {
- // Handle error
- }
-
-You can pass many more configuration parameters to NewClient. Review the
-documentation of NewClient for more information.
-
-If no Elasticsearch server is available, services will fail when creating
-a new request and will return ErrNoClient.
-
-A Client provides services. The services usually come with a variety of
-methods to prepare the query and a Do function to execute it against the
-Elasticsearch REST interface and return a response. Here is an example
-of the IndexExists service that checks if a given index already exists.
-
- exists, err := client.IndexExists("twitter").Do(context.Background())
- if err != nil {
- // Handle error
- }
- if !exists {
- // Index does not exist yet.
- }
-
-Look up the documentation for Client to get an idea of the services provided
-and what kinds of responses you get when executing the Do function of a service.
-Also see the wiki on Github for more details.
-
-*/
-package elastic
diff --git a/vendor/github.com/olivere/elastic/errors.go b/vendor/github.com/olivere/elastic/errors.go
deleted file mode 100644
index e40cda845..000000000
--- a/vendor/github.com/olivere/elastic/errors.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
-
- "github.com/pkg/errors"
-)
-
-// checkResponse will return an error if the request/response indicates
-// an error returned from Elasticsearch.
-//
-// HTTP status codes between in the range [200..299] are considered successful.
-// All other errors are considered errors except they are specified in
-// ignoreErrors. This is necessary because for some services, HTTP status 404
-// is a valid response from Elasticsearch (e.g. the Exists service).
-//
-// The func tries to parse error details as returned from Elasticsearch
-// and encapsulates them in type elastic.Error.
-func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error {
- // 200-299 are valid status codes
- if res.StatusCode >= 200 && res.StatusCode <= 299 {
- return nil
- }
- // Ignore certain errors?
- for _, code := range ignoreErrors {
- if code == res.StatusCode {
- return nil
- }
- }
- return createResponseError(res)
-}
-
-// createResponseError creates an Error structure from the HTTP response,
-// its status code and the error information sent by Elasticsearch.
-func createResponseError(res *http.Response) error {
- if res.Body == nil {
- return &Error{Status: res.StatusCode}
- }
- data, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return &Error{Status: res.StatusCode}
- }
- errReply := new(Error)
- err = json.Unmarshal(data, errReply)
- if err != nil {
- return &Error{Status: res.StatusCode}
- }
- if errReply != nil {
- if errReply.Status == 0 {
- errReply.Status = res.StatusCode
- }
- return errReply
- }
- return &Error{Status: res.StatusCode}
-}
-
-// Error encapsulates error details as returned from Elasticsearch.
-type Error struct {
- Status int `json:"status"`
- Details *ErrorDetails `json:"error,omitempty"`
-}
-
-// ErrorDetails encapsulate error details from Elasticsearch.
-// It is used in e.g. elastic.Error and elastic.BulkResponseItem.
-type ErrorDetails struct {
- Type string `json:"type"`
- Reason string `json:"reason"`
- ResourceType string `json:"resource.type,omitempty"`
- ResourceId string `json:"resource.id,omitempty"`
- Index string `json:"index,omitempty"`
- Phase string `json:"phase,omitempty"`
- Grouped bool `json:"grouped,omitempty"`
- CausedBy map[string]interface{} `json:"caused_by,omitempty"`
- RootCause []*ErrorDetails `json:"root_cause,omitempty"`
- FailedShards []map[string]interface{} `json:"failed_shards,omitempty"`
-}
-
-// Error returns a string representation of the error.
-func (e *Error) Error() string {
- if e.Details != nil && e.Details.Reason != "" {
- return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type)
- } else {
- return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
- }
-}
-
-// IsConnErr returns true if the error indicates that Elastic could not
-// find an Elasticsearch host to connect to.
-func IsConnErr(err error) bool {
- return err == ErrNoClient || errors.Cause(err) == ErrNoClient
-}
-
-// IsNotFound returns true if the given error indicates that Elasticsearch
-// returned HTTP status 404. The err parameter can be of type *elastic.Error,
-// elastic.Error, *http.Response or int (indicating the HTTP status code).
-func IsNotFound(err interface{}) bool {
- return IsStatusCode(err, http.StatusNotFound)
-}
-
-// IsTimeout returns true if the given error indicates that Elasticsearch
-// returned HTTP status 408. The err parameter can be of type *elastic.Error,
-// elastic.Error, *http.Response or int (indicating the HTTP status code).
-func IsTimeout(err interface{}) bool {
- return IsStatusCode(err, http.StatusRequestTimeout)
-}
-
-// IsConflict returns true if the given error indicates that the Elasticsearch
-// operation resulted in a version conflict. This can occur in operations like
-// `update` or `index` with `op_type=create`. The err parameter can be of
-// type *elastic.Error, elastic.Error, *http.Response or int (indicating the
-// HTTP status code).
-func IsConflict(err interface{}) bool {
- return IsStatusCode(err, http.StatusConflict)
-}
-
-// IsStatusCode returns true if the given error indicates that the Elasticsearch
-// operation returned the specified HTTP status code. The err parameter can be of
-// type *http.Response, *Error, Error, or int (indicating the HTTP status code).
-func IsStatusCode(err interface{}, code int) bool {
- switch e := err.(type) {
- case *http.Response:
- return e.StatusCode == code
- case *Error:
- return e.Status == code
- case Error:
- return e.Status == code
- case int:
- return e == code
- }
- return false
-}
-
-// -- General errors --
-
-// shardsInfo represents information from a shard.
-type shardsInfo struct {
- Total int `json:"total"`
- Successful int `json:"successful"`
- Failed int `json:"failed"`
-}
-
-// shardOperationFailure represents a shard failure.
-type shardOperationFailure struct {
- Shard int `json:"shard"`
- Index string `json:"index"`
- Status string `json:"status"`
- // "reason"
-}
diff --git a/vendor/github.com/olivere/elastic/errors_test.go b/vendor/github.com/olivere/elastic/errors_test.go
deleted file mode 100644
index 75d3949e5..000000000
--- a/vendor/github.com/olivere/elastic/errors_test.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package elastic
-
-import (
- "bufio"
- "fmt"
- "net/http"
- "strings"
- "testing"
-)
-
-func TestResponseError(t *testing.T) {
- raw := "HTTP/1.1 404 Not Found\r\n" +
- "\r\n" +
- `{"error":{"root_cause":[{"type":"index_missing_exception","reason":"no such index","index":"elastic-test"}],"type":"index_missing_exception","reason":"no such index","index":"elastic-test"},"status":404}` + "\r\n"
- r := bufio.NewReader(strings.NewReader(raw))
-
- req, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := http.ReadResponse(r, nil)
- if err != nil {
- t.Fatal(err)
- }
- err = checkResponse(req, resp)
- if err == nil {
- t.Fatalf("expected error; got: %v", err)
- }
-
- // Check for correct error message
- expected := fmt.Sprintf("elastic: Error %d (%s): no such index [type=index_missing_exception]", resp.StatusCode, http.StatusText(resp.StatusCode))
- got := err.Error()
- if got != expected {
- t.Fatalf("expected %q; got: %q", expected, got)
- }
-
- // Check that error is of type *elastic.Error, which contains additional information
- e, ok := err.(*Error)
- if !ok {
- t.Fatal("expected error to be of type *elastic.Error")
- }
- if e.Status != resp.StatusCode {
- t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status)
- }
- if e.Details == nil {
- t.Fatalf("expected error details; got: %v", e.Details)
- }
- if got, want := e.Details.Index, "elastic-test"; got != want {
- t.Fatalf("expected error details index %q; got: %q", want, got)
- }
- if got, want := e.Details.Type, "index_missing_exception"; got != want {
- t.Fatalf("expected error details type %q; got: %q", want, got)
- }
- if got, want := e.Details.Reason, "no such index"; got != want {
- t.Fatalf("expected error details reason %q; got: %q", want, got)
- }
- if got, want := len(e.Details.RootCause), 1; got != want {
- t.Fatalf("expected %d error details root causes; got: %d", want, got)
- }
-
- if got, want := e.Details.RootCause[0].Index, "elastic-test"; got != want {
- t.Fatalf("expected root cause index %q; got: %q", want, got)
- }
- if got, want := e.Details.RootCause[0].Type, "index_missing_exception"; got != want {
- t.Fatalf("expected root cause type %q; got: %q", want, got)
- }
- if got, want := e.Details.RootCause[0].Reason, "no such index"; got != want {
- t.Fatalf("expected root cause reason %q; got: %q", want, got)
- }
-}
-
-func TestResponseErrorHTML(t *testing.T) {
- raw := "HTTP/1.1 413 Request Entity Too Large\r\n" +
- "\r\n" +
- `<html>
-<head><title>413 Request Entity Too Large</title></head>
-<body bgcolor="white">
-<center><h1>413 Request Entity Too Large</h1></center>
-<hr><center>nginx/1.6.2</center>
-</body>
-</html>` + "\r\n"
- r := bufio.NewReader(strings.NewReader(raw))
-
- req, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := http.ReadResponse(r, nil)
- if err != nil {
- t.Fatal(err)
- }
- err = checkResponse(req, resp)
- if err == nil {
- t.Fatalf("expected error; got: %v", err)
- }
-
- // Check for correct error message
- expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge))
- got := err.Error()
- if got != expected {
- t.Fatalf("expected %q; got: %q", expected, got)
- }
-}
-
-func TestResponseErrorWithIgnore(t *testing.T) {
- raw := "HTTP/1.1 404 Not Found\r\n" +
- "\r\n" +
- `{"some":"response"}` + "\r\n"
- r := bufio.NewReader(strings.NewReader(raw))
-
- req, err := http.NewRequest("HEAD", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := http.ReadResponse(r, nil)
- if err != nil {
- t.Fatal(err)
- }
- err = checkResponse(req, resp)
- if err == nil {
- t.Fatalf("expected error; got: %v", err)
- }
- err = checkResponse(req, resp, 404) // ignore 404 errors
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
-}
-
-func TestIsNotFound(t *testing.T) {
- if got, want := IsNotFound(nil), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsNotFound(""), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsNotFound(200), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsNotFound(404), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsNotFound(&Error{Status: 404}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsNotFound(&Error{Status: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsNotFound(Error{Status: 404}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsNotFound(Error{Status: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsNotFound(&http.Response{StatusCode: 404}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsNotFound(&http.Response{StatusCode: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-}
-
-func TestIsTimeout(t *testing.T) {
- if got, want := IsTimeout(nil), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsTimeout(""), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsTimeout(200), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsTimeout(408), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsTimeout(&Error{Status: 408}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsTimeout(&Error{Status: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsTimeout(Error{Status: 408}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsTimeout(Error{Status: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsTimeout(&http.Response{StatusCode: 408}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsTimeout(&http.Response{StatusCode: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-}
-
-func TestIsConflict(t *testing.T) {
- if got, want := IsConflict(nil), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsConflict(""), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsConflict(200), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsConflict(http.StatusConflict), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsConflict(&Error{Status: 409}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsConflict(&Error{Status: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsConflict(Error{Status: 409}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsConflict(Error{Status: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-
- if got, want := IsConflict(&http.Response{StatusCode: 409}), true; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
- if got, want := IsConflict(&http.Response{StatusCode: 200}), false; got != want {
- t.Errorf("expected %v; got: %v", want, got)
- }
-}
-
-func TestIsStatusCode(t *testing.T) {
- tests := []struct {
- Error interface{}
- Code int
- Want bool
- }{
- // #0
- {
- Error: nil,
- Code: 200,
- Want: false,
- },
- // #1
- {
- Error: "",
- Code: 200,
- Want: false,
- },
- // #2
- {
- Error: http.StatusConflict,
- Code: 409,
- Want: true,
- },
- // #3
- {
- Error: http.StatusConflict,
- Code: http.StatusInternalServerError,
- Want: false,
- },
- // #4
- {
- Error: &Error{Status: http.StatusConflict},
- Code: 409,
- Want: true,
- },
- // #5
- {
- Error: Error{Status: http.StatusConflict},
- Code: 409,
- Want: true,
- },
- // #6
- {
- Error: &http.Response{StatusCode: http.StatusConflict},
- Code: 409,
- Want: true,
- },
- }
-
- for i, tt := range tests {
- if have, want := IsStatusCode(tt.Error, tt.Code), tt.Want; have != want {
- t.Errorf("#%d: have %v, want %v", i, have, want)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/etc/elasticsearch.yml b/vendor/github.com/olivere/elastic/etc/elasticsearch.yml
deleted file mode 100644
index 9923cfe4f..000000000
--- a/vendor/github.com/olivere/elastic/etc/elasticsearch.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# bootstrap.ignore_system_bootstrap_checks: true
-
-discovery.zen.minimum_master_nodes: 1
-
-network.host:
-- _local_
-- _site_
-
-network.publish_host: _local_
-
-
-# Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html
-script.inline: true
-script.stored: true
-script.file: true
diff --git a/vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep b/vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep
+++ /dev/null
diff --git a/vendor/github.com/olivere/elastic/etc/jvm.options b/vendor/github.com/olivere/elastic/etc/jvm.options
deleted file mode 100644
index d97fbc9ec..000000000
--- a/vendor/github.com/olivere/elastic/etc/jvm.options
+++ /dev/null
@@ -1,100 +0,0 @@
-## JVM configuration
-
-################################################################
-## IMPORTANT: JVM heap size
-################################################################
-##
-## You should always set the min and max JVM heap
-## size to the same value. For example, to set
-## the heap to 4 GB, set:
-##
-## -Xms4g
-## -Xmx4g
-##
-## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
-## for more information
-##
-################################################################
-
-# Xms represents the initial size of total heap space
-# Xmx represents the maximum size of total heap space
-
--Xms2g
--Xmx2g
-
-################################################################
-## Expert settings
-################################################################
-##
-## All settings below this section are considered
-## expert settings. Don't tamper with them unless
-## you understand what you are doing
-##
-################################################################
-
-## GC configuration
--XX:+UseConcMarkSweepGC
--XX:CMSInitiatingOccupancyFraction=75
--XX:+UseCMSInitiatingOccupancyOnly
-
-## optimizations
-
-# disable calls to System#gc
--XX:+DisableExplicitGC
-
-# pre-touch memory pages used by the JVM during initialization
--XX:+AlwaysPreTouch
-
-## basic
-
-# force the server VM
--server
-
-# set to headless, just in case
--Djava.awt.headless=true
-
-# ensure UTF-8 encoding by default (e.g. filenames)
--Dfile.encoding=UTF-8
-
-# use our provided JNA always versus the system one
--Djna.nosys=true
-
-# flags to keep Netty from being unsafe
--Dio.netty.noUnsafe=true
--Dio.netty.noKeySetOptimization=true
-
-# log4j 2
--Dlog4j.shutdownHookEnabled=false
--Dlog4j2.disable.jmx=true
--Dlog4j.skipJansi=true
-
-## heap dumps
-
-# generate a heap dump when an allocation from the Java heap fails
-# heap dumps are created in the working directory of the JVM
--XX:+HeapDumpOnOutOfMemoryError
-
-# specify an alternative path for heap dumps
-# ensure the directory exists and has sufficient space
-#-XX:HeapDumpPath=${heap.dump.path}
-
-## GC logging
-
-#-XX:+PrintGCDetails
-#-XX:+PrintGCTimeStamps
-#-XX:+PrintGCDateStamps
-#-XX:+PrintClassHistogram
-#-XX:+PrintTenuringDistribution
-#-XX:+PrintGCApplicationStoppedTime
-
-# log GC status to a file with time stamps
-# ensure the directory exists
-#-Xloggc:${loggc}
-
-# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
-# If documents were already indexed with unquoted fields in a previous version
-# of Elasticsearch, some operations may throw errors.
-#
-# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
-# only for migration purposes.
-#-Delasticsearch.json.allow_unquoted_field_names=true
diff --git a/vendor/github.com/olivere/elastic/etc/log4j2.properties b/vendor/github.com/olivere/elastic/etc/log4j2.properties
deleted file mode 100644
index 9a3147f5a..000000000
--- a/vendor/github.com/olivere/elastic/etc/log4j2.properties
+++ /dev/null
@@ -1,74 +0,0 @@
-status = error
-
-# log action execution errors for easier debugging
-logger.action.name = org.elasticsearch.action
-logger.action.level = debug
-
-appender.console.type = Console
-appender.console.name = console
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
-
-appender.rolling.type = RollingFile
-appender.rolling.name = rolling
-appender.rolling.fileName = ${sys:es.logs}.log
-appender.rolling.layout.type = PatternLayout
-appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
-appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
-appender.rolling.policies.type = Policies
-appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
-appender.rolling.policies.time.interval = 1
-appender.rolling.policies.time.modulate = true
-
-rootLogger.level = info
-rootLogger.appenderRef.console.ref = console
-rootLogger.appenderRef.rolling.ref = rolling
-
-appender.deprecation_rolling.type = RollingFile
-appender.deprecation_rolling.name = deprecation_rolling
-appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
-appender.deprecation_rolling.layout.type = PatternLayout
-appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
-appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
-appender.deprecation_rolling.policies.type = Policies
-appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
-appender.deprecation_rolling.policies.size.size = 1GB
-appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
-appender.deprecation_rolling.strategy.max = 4
-
-logger.deprecation.name = org.elasticsearch.deprecation
-logger.deprecation.level = warn
-logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
-logger.deprecation.additivity = false
-
-appender.index_search_slowlog_rolling.type = RollingFile
-appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
-appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
-appender.index_search_slowlog_rolling.layout.type = PatternLayout
-appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
-appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
-appender.index_search_slowlog_rolling.policies.type = Policies
-appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
-appender.index_search_slowlog_rolling.policies.time.interval = 1
-appender.index_search_slowlog_rolling.policies.time.modulate = true
-
-logger.index_search_slowlog_rolling.name = index.search.slowlog
-logger.index_search_slowlog_rolling.level = trace
-logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
-logger.index_search_slowlog_rolling.additivity = false
-
-appender.index_indexing_slowlog_rolling.type = RollingFile
-appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
-appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
-appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
-appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
-appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
-appender.index_indexing_slowlog_rolling.policies.type = Policies
-appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
-appender.index_indexing_slowlog_rolling.policies.time.interval = 1
-appender.index_indexing_slowlog_rolling.policies.time.modulate = true
-
-logger.index_indexing_slowlog.name = index.indexing.slowlog.index
-logger.index_indexing_slowlog.level = trace
-logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
-logger.index_indexing_slowlog.additivity = false
diff --git a/vendor/github.com/olivere/elastic/etc/scripts/.gitkeep b/vendor/github.com/olivere/elastic/etc/scripts/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/vendor/github.com/olivere/elastic/etc/scripts/.gitkeep
+++ /dev/null
diff --git a/vendor/github.com/olivere/elastic/example_test.go b/vendor/github.com/olivere/elastic/example_test.go
deleted file mode 100644
index 62dc15d89..000000000
--- a/vendor/github.com/olivere/elastic/example_test.go
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic_test
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "log"
- "os"
- "reflect"
- "time"
-
- elastic "github.com/olivere/elastic"
-)
-
-type Tweet struct {
- User string `json:"user"`
- Message string `json:"message"`
- Retweets int `json:"retweets"`
- Image string `json:"image,omitempty"`
- Created time.Time `json:"created,omitempty"`
- Tags []string `json:"tags,omitempty"`
- Location string `json:"location,omitempty"`
- Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
-}
-
-func Example() {
- errorlog := log.New(os.Stdout, "APP ", log.LstdFlags)
-
- // Obtain a client. You can also provide your own HTTP client here.
- client, err := elastic.NewClient(elastic.SetErrorLog(errorlog))
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // Trace request and response details like this
- //client.SetTracer(log.New(os.Stdout, "", 0))
-
- // Ping the Elasticsearch server to get e.g. the version number
- info, code, err := client.Ping("http://127.0.0.1:9200").Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- fmt.Printf("Elasticsearch returned with code %d and version %s\n", code, info.Version.Number)
-
- // Getting the ES version number is quite common, so there's a shortcut
- esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
- if err != nil {
- // Handle error
- panic(err)
- }
- fmt.Printf("Elasticsearch version %s\n", esversion)
-
- // Use the IndexExists service to check if a specified index exists.
- exists, err := client.IndexExists("twitter").Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- if !exists {
- // Create a new index.
- mapping := `
-{
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "properties":{
- "user":{
- "type":"keyword"
- },
- "message":{
- "type":"text",
- "store": true,
- "fielddata": true
- },
- "retweets":{
- "type":"long"
- },
- "tags":{
- "type":"keyword"
- },
- "location":{
- "type":"geo_point"
- },
- "suggest_field":{
- "type":"completion"
- }
- }
- }
- }
-}
-`
- createIndex, err := client.CreateIndex("twitter").Body(mapping).Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- if !createIndex.Acknowledged {
- // Not acknowledged
- }
- }
-
- // Index a tweet (using JSON serialization)
- tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
- put1, err := client.Index().
- Index("twitter").
- Type("doc").
- Id("1").
- BodyJson(tweet1).
- Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
-
- // Index a second tweet (by string)
- tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
- put2, err := client.Index().
- Index("twitter").
- Type("doc").
- Id("2").
- BodyString(tweet2).
- Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
-
- // Get tweet with specified ID
- get1, err := client.Get().
- Index("twitter").
- Type("doc").
- Id("1").
- Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- if get1.Found {
- fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
- }
-
- // Flush to make sure the documents got written.
- _, err = client.Flush().Index("twitter").Do(context.Background())
- if err != nil {
- panic(err)
- }
-
- // Search with a term query
- termQuery := elastic.NewTermQuery("user", "olivere")
- searchResult, err := client.Search().
- Index("twitter"). // search in index "twitter"
- Query(termQuery). // specify the query
- Sort("user", true). // sort by "user" field, ascending
- From(0).Size(10). // take documents 0-9
- Pretty(true). // pretty print request and response JSON
- Do(context.Background()) // execute
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // searchResult is of type SearchResult and returns hits, suggestions,
- // and all kinds of other information from Elasticsearch.
- fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
-
- // Each is a convenience function that iterates over hits in a search result.
- // It makes sure you don't need to check for nil values in the response.
- // However, it ignores errors in serialization. If you want full control
- // over iterating the hits, see below.
- var ttyp Tweet
- for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
- t := item.(Tweet)
- fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
- }
- // TotalHits is another convenience function that works even when something goes wrong.
- fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
-
- // Here's how you iterate through results with full control over each step.
- if searchResult.Hits.TotalHits > 0 {
- fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
-
- // Iterate through results
- for _, hit := range searchResult.Hits.Hits {
- // hit.Index contains the name of the index
-
- // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
- var t Tweet
- err := json.Unmarshal(*hit.Source, &t)
- if err != nil {
- // Deserialization failed
- }
-
- // Work with tweet
- fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
- }
- } else {
- // No hits
- fmt.Print("Found no tweets\n")
- }
-
- // Update a tweet by the update API of Elasticsearch.
- // We just increment the number of retweets.
- script := elastic.NewScript("ctx._source.retweets += params.num").Param("num", 1)
- update, err := client.Update().Index("twitter").Type("doc").Id("1").
- Script(script).
- Upsert(map[string]interface{}{"retweets": 0}).
- Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
-
- // ...
-
- // Delete an index.
- deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- if !deleteIndex.Acknowledged {
- // Not acknowledged
- }
-}
-
-func ExampleClient_NewClient_default() {
- // Obtain a client to the Elasticsearch instance on http://127.0.0.1:9200.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- fmt.Printf("connection failed: %v\n", err)
- } else {
- fmt.Println("connected")
- }
- _ = client
- // Output:
- // connected
-}
-
-func ExampleClient_NewClient_cluster() {
- // Obtain a client for an Elasticsearch cluster of two nodes,
- // running on 10.0.1.1 and 10.0.1.2.
- client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"))
- if err != nil {
- // Handle error
- panic(err)
- }
- _ = client
-}
-
-func ExampleClient_NewClient_manyOptions() {
- // Obtain a client for an Elasticsearch cluster of two nodes,
- // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer.
- // Set the healthcheck interval to 10s. When requests fail,
- // retry 5 times. Print error messages to os.Stderr and informational
- // messages to os.Stdout.
- client, err := elastic.NewClient(
- elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"),
- elastic.SetSniff(false),
- elastic.SetHealthcheckInterval(10*time.Second),
- elastic.SetMaxRetries(5),
- elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)),
- elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags)))
- if err != nil {
- // Handle error
- panic(err)
- }
- _ = client
-}
-
-func ExampleIndexExistsService() {
- // Get a client to the local Elasticsearch instance.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- panic(err)
- }
- // Use the IndexExists service to check if the index "twitter" exists.
- exists, err := client.IndexExists("twitter").Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- if exists {
- // ...
- }
-}
-
-func ExampleCreateIndexService() {
- // Get a client to the local Elasticsearch instance.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- panic(err)
- }
- // Create a new index.
- createIndex, err := client.CreateIndex("twitter").Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- if !createIndex.Acknowledged {
- // Not acknowledged
- }
-}
-
-func ExampleDeleteIndexService() {
- // Get a client to the local Elasticsearch instance.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- panic(err)
- }
- // Delete an index.
- deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- if !deleteIndex.Acknowledged {
- // Not acknowledged
- }
-}
-
-func ExampleSearchService() {
- // Get a client to the local Elasticsearch instance.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // Search with a term query
- termQuery := elastic.NewTermQuery("user", "olivere")
- searchResult, err := client.Search().
- Index("twitter"). // search in index "twitter"
- Query(termQuery). // specify the query
- Sort("user", true). // sort by "user" field, ascending
- From(0).Size(10). // take documents 0-9
- Pretty(true). // pretty print request and response JSON
- Do(context.Background()) // execute
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // searchResult is of type SearchResult and returns hits, suggestions,
- // and all kinds of other information from Elasticsearch.
- fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
-
- // Number of hits
- if searchResult.Hits.TotalHits > 0 {
- fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
-
- // Iterate through results
- for _, hit := range searchResult.Hits.Hits {
- // hit.Index contains the name of the index
-
- // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
- var t Tweet
- err := json.Unmarshal(*hit.Source, &t)
- if err != nil {
- // Deserialization failed
- }
-
- // Work with tweet
- fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
- }
- } else {
- // No hits
- fmt.Print("Found no tweets\n")
- }
-}
-
-func ExampleAggregations() {
- // Get a client to the local Elasticsearch instance.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
- timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
- histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
- timeline = timeline.SubAggregation("history", histogram)
-
- // Search with a term query
- searchResult, err := client.Search().
- Index("twitter"). // search in index "twitter"
- Query(elastic.NewMatchAllQuery()). // return all results, but ...
- SearchType("count"). // ... do not return hits, just the count
- Aggregation("timeline", timeline). // add our aggregation to the query
- Pretty(true). // pretty print request and response JSON
- Do(context.Background()) // execute
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // Access "timeline" aggregate in search result.
- agg, found := searchResult.Aggregations.Terms("timeline")
- if !found {
- log.Fatalf("we should have a terms aggregation called %q", "timeline")
- }
- for _, userBucket := range agg.Buckets {
- // Every bucket should have the user field as key.
- user := userBucket.Key
-
- // The sub-aggregation history should have the number of tweets per year.
- histogram, found := userBucket.DateHistogram("history")
- if found {
- for _, year := range histogram.Buckets {
- fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString)
- }
- }
- }
-}
-
-func ExampleSearchResult() {
- client, err := elastic.NewClient()
- if err != nil {
- panic(err)
- }
-
- // Do a search
- searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do(context.Background())
- if err != nil {
- panic(err)
- }
-
- // searchResult is of type SearchResult and returns hits, suggestions,
- // and all kinds of other information from Elasticsearch.
- fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
-
- // Each is a utility function that iterates over hits in a search result.
- // It makes sure you don't need to check for nil values in the response.
- // However, it ignores errors in serialization. If you want full control
- // over iterating the hits, see below.
- var ttyp Tweet
- for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
- t := item.(Tweet)
- fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
- }
- fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
-
- // Here's how you iterate hits with full control.
- if searchResult.Hits.TotalHits > 0 {
- fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
-
- // Iterate through results
- for _, hit := range searchResult.Hits.Hits {
- // hit.Index contains the name of the index
-
- // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
- var t Tweet
- err := json.Unmarshal(*hit.Source, &t)
- if err != nil {
- // Deserialization failed
- }
-
- // Work with tweet
- fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
- }
- } else {
- // No hits
- fmt.Print("Found no tweets\n")
- }
-}
-
-func ExampleClusterHealthService() {
- client, err := elastic.NewClient()
- if err != nil {
- panic(err)
- }
-
- // Get cluster health
- res, err := client.ClusterHealth().Index("twitter").Do(context.Background())
- if err != nil {
- panic(err)
- }
- if res == nil {
- panic(err)
- }
- fmt.Printf("Cluster status is %q\n", res.Status)
-}
-
-func ExampleClusterHealthService_WaitForGreen() {
- client, err := elastic.NewClient()
- if err != nil {
- panic(err)
- }
-
- // Wait for status green
- res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do(context.Background())
- if err != nil {
- panic(err)
- }
- if res.TimedOut {
- fmt.Printf("time out waiting for cluster status %q\n", "green")
- } else {
- fmt.Printf("cluster status is %q\n", res.Status)
- }
-}
-
-func ExampleClusterStateService() {
- client, err := elastic.NewClient()
- if err != nil {
- panic(err)
- }
-
- // Get cluster state
- res, err := client.ClusterState().Metric("version").Do(context.Background())
- if err != nil {
- panic(err)
- }
- fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version)
-}
diff --git a/vendor/github.com/olivere/elastic/exists.go b/vendor/github.com/olivere/elastic/exists.go
deleted file mode 100644
index ae5a88fa7..000000000
--- a/vendor/github.com/olivere/elastic/exists.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// ExistsService checks for the existence of a document using HEAD.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-get.html
-// for details.
-type ExistsService struct {
- client *Client
- pretty bool
- id string
- index string
- typ string
- preference string
- realtime *bool
- refresh string
- routing string
- parent string
-}
-
-// NewExistsService creates a new ExistsService.
-func NewExistsService(client *Client) *ExistsService {
- return &ExistsService{
- client: client,
- }
-}
-
-// Id is the document ID.
-func (s *ExistsService) Id(id string) *ExistsService {
- s.id = id
- return s
-}
-
-// Index is the name of the index.
-func (s *ExistsService) Index(index string) *ExistsService {
- s.index = index
- return s
-}
-
-// Type is the type of the document (use `_all` to fetch the first document
-// matching the ID across all types).
-func (s *ExistsService) Type(typ string) *ExistsService {
- s.typ = typ
- return s
-}
-
-// Preference specifies the node or shard the operation should be performed on (default: random).
-func (s *ExistsService) Preference(preference string) *ExistsService {
- s.preference = preference
- return s
-}
-
-// Realtime specifies whether to perform the operation in realtime or search mode.
-func (s *ExistsService) Realtime(realtime bool) *ExistsService {
- s.realtime = &realtime
- return s
-}
-
-// Refresh the shard containing the document before performing the operation.
-func (s *ExistsService) Refresh(refresh string) *ExistsService {
- s.refresh = refresh
- return s
-}
-
-// Routing is a specific routing value.
-func (s *ExistsService) Routing(routing string) *ExistsService {
- s.routing = routing
- return s
-}
-
-// Parent is the ID of the parent document.
-func (s *ExistsService) Parent(parent string) *ExistsService {
- s.parent = parent
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *ExistsService) Pretty(pretty bool) *ExistsService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *ExistsService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
- "id": s.id,
- "index": s.index,
- "type": s.typ,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.realtime != nil {
- params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.parent != "" {
- params.Set("parent", s.parent)
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *ExistsService) Validate() error {
- var invalid []string
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if s.typ == "" {
- invalid = append(invalid, "Type")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *ExistsService) Do(ctx context.Context) (bool, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return false, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return false, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "HEAD",
- Path: path,
- Params: params,
- IgnoreErrors: []int{404},
- })
- if err != nil {
- return false, err
- }
-
- // Return operation response
- switch res.StatusCode {
- case http.StatusOK:
- return true, nil
- case http.StatusNotFound:
- return false, nil
- default:
- return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/exists_test.go b/vendor/github.com/olivere/elastic/exists_test.go
deleted file mode 100644
index 9b834223d..000000000
--- a/vendor/github.com/olivere/elastic/exists_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestExists(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !exists {
- t.Fatal("expected document to exist")
- }
-}
-
-func TestExistsValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No index -> fail with error
- res, err := NewExistsService(client).Type("doc").Id("1").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Delete to fail without index name")
- }
- if res != false {
- t.Fatalf("expected result to be false; got: %v", res)
- }
-
- // No type -> fail with error
- res, err = NewExistsService(client).Index(testIndexName).Id("1").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Delete to fail without index name")
- }
- if res != false {
- t.Fatalf("expected result to be false; got: %v", res)
- }
-
- // No id -> fail with error
- res, err = NewExistsService(client).Index(testIndexName).Type("doc").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Delete to fail without index name")
- }
- if res != false {
- t.Fatalf("expected result to be false; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/explain.go b/vendor/github.com/olivere/elastic/explain.go
deleted file mode 100644
index 2b975ad5d..000000000
--- a/vendor/github.com/olivere/elastic/explain.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// ExplainService computes a score explanation for a query and
-// a specific document.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-explain.html.
-type ExplainService struct {
- client *Client
- pretty bool
- id string
- index string
- typ string
- q string
- routing string
- lenient *bool
- analyzer string
- df string
- fields []string
- lowercaseExpandedTerms *bool
- xSourceInclude []string
- analyzeWildcard *bool
- parent string
- preference string
- xSource []string
- defaultOperator string
- xSourceExclude []string
- source string
- bodyJson interface{}
- bodyString string
-}
-
-// NewExplainService creates a new ExplainService.
-func NewExplainService(client *Client) *ExplainService {
- return &ExplainService{
- client: client,
- xSource: make([]string, 0),
- xSourceExclude: make([]string, 0),
- fields: make([]string, 0),
- xSourceInclude: make([]string, 0),
- }
-}
-
-// Id is the document ID.
-func (s *ExplainService) Id(id string) *ExplainService {
- s.id = id
- return s
-}
-
-// Index is the name of the index.
-func (s *ExplainService) Index(index string) *ExplainService {
- s.index = index
- return s
-}
-
-// Type is the type of the document.
-func (s *ExplainService) Type(typ string) *ExplainService {
- s.typ = typ
- return s
-}
-
-// Source is the URL-encoded query definition (instead of using the request body).
-func (s *ExplainService) Source(source string) *ExplainService {
- s.source = source
- return s
-}
-
-// XSourceExclude is a list of fields to exclude from the returned _source field.
-func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
- s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
- return s
-}
-
-// Lenient specifies whether format-based query failures
-// (such as providing text to a numeric field) should be ignored.
-func (s *ExplainService) Lenient(lenient bool) *ExplainService {
- s.lenient = &lenient
- return s
-}
-
-// Query in the Lucene query string syntax.
-func (s *ExplainService) Q(q string) *ExplainService {
- s.q = q
- return s
-}
-
-// Routing sets a specific routing value.
-func (s *ExplainService) Routing(routing string) *ExplainService {
- s.routing = routing
- return s
-}
-
-// AnalyzeWildcard specifies whether wildcards and prefix queries
-// in the query string query should be analyzed (default: false).
-func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
- s.analyzeWildcard = &analyzeWildcard
- return s
-}
-
-// Analyzer is the analyzer for the query string query.
-func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
- s.analyzer = analyzer
- return s
-}
-
-// Df is the default field for query string query (default: _all).
-func (s *ExplainService) Df(df string) *ExplainService {
- s.df = df
- return s
-}
-
-// Fields is a list of fields to return in the response.
-func (s *ExplainService) Fields(fields ...string) *ExplainService {
- s.fields = append(s.fields, fields...)
- return s
-}
-
-// LowercaseExpandedTerms specifies whether query terms should be lowercased.
-func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
- s.lowercaseExpandedTerms = &lowercaseExpandedTerms
- return s
-}
-
-// XSourceInclude is a list of fields to extract and return from the _source field.
-func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
- s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
- return s
-}
-
-// DefaultOperator is the default operator for query string query (AND or OR).
-func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
- s.defaultOperator = defaultOperator
- return s
-}
-
-// Parent is the ID of the parent document.
-func (s *ExplainService) Parent(parent string) *ExplainService {
- s.parent = parent
- return s
-}
-
-// Preference specifies the node or shard the operation should be performed on (default: random).
-func (s *ExplainService) Preference(preference string) *ExplainService {
- s.preference = preference
- return s
-}
-
-// XSource is true or false to return the _source field or not, or a list of fields to return.
-func (s *ExplainService) XSource(xSource ...string) *ExplainService {
- s.xSource = append(s.xSource, xSource...)
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *ExplainService) Pretty(pretty bool) *ExplainService {
- s.pretty = pretty
- return s
-}
-
-// Query sets a query definition using the Query DSL.
-func (s *ExplainService) Query(query Query) *ExplainService {
- src, err := query.Source()
- if err != nil {
- // Do nothing in case of an error
- return s
- }
- body := make(map[string]interface{})
- body["query"] = src
- s.bodyJson = body
- return s
-}
-
-// BodyJson sets the query definition using the Query DSL.
-func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
- s.bodyJson = body
- return s
-}
-
-// BodyString sets the query definition using the Query DSL as a string.
-func (s *ExplainService) BodyString(body string) *ExplainService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *ExplainService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
- "id": s.id,
- "index": s.index,
- "type": s.typ,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if len(s.xSource) > 0 {
- params.Set("_source", strings.Join(s.xSource, ","))
- }
- if s.defaultOperator != "" {
- params.Set("default_operator", s.defaultOperator)
- }
- if s.parent != "" {
- params.Set("parent", s.parent)
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if s.source != "" {
- params.Set("source", s.source)
- }
- if len(s.xSourceExclude) > 0 {
- params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
- }
- if s.lenient != nil {
- params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
- }
- if s.q != "" {
- params.Set("q", s.q)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if len(s.fields) > 0 {
- params.Set("fields", strings.Join(s.fields, ","))
- }
- if s.lowercaseExpandedTerms != nil {
- params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
- }
- if len(s.xSourceInclude) > 0 {
- params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
- }
- if s.analyzeWildcard != nil {
- params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
- }
- if s.analyzer != "" {
- params.Set("analyzer", s.analyzer)
- }
- if s.df != "" {
- params.Set("df", s.df)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *ExplainService) Validate() error {
- var invalid []string
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if s.typ == "" {
- invalid = append(invalid, "Type")
- }
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(ExplainResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// ExplainResponse is the response of ExplainService.Do.
-type ExplainResponse struct {
- Index string `json:"_index"`
- Type string `json:"_type"`
- Id string `json:"_id"`
- Matched bool `json:"matched"`
- Explanation map[string]interface{} `json:"explanation"`
-}
diff --git a/vendor/github.com/olivere/elastic/explain_test.go b/vendor/github.com/olivere/elastic/explain_test.go
deleted file mode 100644
index 22cb9668a..000000000
--- a/vendor/github.com/olivere/elastic/explain_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestExplain(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
-
- // Add a document
- indexResult, err := client.Index().
- Index(testIndexName).
- Type("doc").
- Id("1").
- BodyJson(&tweet1).
- Refresh("true").
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if indexResult == nil {
- t.Errorf("expected result to be != nil; got: %v", indexResult)
- }
-
- // Explain
- query := NewTermQuery("user", "olivere")
- expl, err := client.Explain(testIndexName, "doc", "1").Query(query).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if expl == nil {
- t.Fatal("expected to return an explanation")
- }
- if !expl.Matched {
- t.Errorf("expected matched to be %v; got: %v", true, expl.Matched)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/fetch_source_context.go b/vendor/github.com/olivere/elastic/fetch_source_context.go
deleted file mode 100644
index 874c4c1da..000000000
--- a/vendor/github.com/olivere/elastic/fetch_source_context.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "net/url"
- "strings"
-)
-
-// FetchSourceContext enables source filtering, i.e. it allows control
-// over how the _source field is returned with every hit. It is used
-// with various endpoints, e.g. when searching for documents, retrieving
-// individual documents, or even updating documents.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-source-filtering.html
-// for details.
-type FetchSourceContext struct {
- fetchSource bool
- includes []string
- excludes []string
-}
-
-// NewFetchSourceContext returns a new FetchSourceContext.
-func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
- return &FetchSourceContext{
- fetchSource: fetchSource,
- includes: make([]string, 0),
- excludes: make([]string, 0),
- }
-}
-
-// FetchSource indicates whether to return the _source.
-func (fsc *FetchSourceContext) FetchSource() bool {
- return fsc.fetchSource
-}
-
-// SetFetchSource specifies whether to return the _source.
-func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
- fsc.fetchSource = fetchSource
-}
-
-// Include indicates to return specific parts of the _source.
-// Wildcards are allowed here.
-func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
- fsc.includes = append(fsc.includes, includes...)
- return fsc
-}
-
-// Exclude indicates to exclude specific parts of the _source.
-// Wildcards are allowed here.
-func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
- fsc.excludes = append(fsc.excludes, excludes...)
- return fsc
-}
-
-// Source returns the JSON-serializable data to be used in a body.
-func (fsc *FetchSourceContext) Source() (interface{}, error) {
- if !fsc.fetchSource {
- return false, nil
- }
- if len(fsc.includes) == 0 && len(fsc.excludes) == 0 {
- return true, nil
- }
- src := make(map[string]interface{})
- if len(fsc.includes) > 0 {
- src["includes"] = fsc.includes
- }
- if len(fsc.excludes) > 0 {
- src["excludes"] = fsc.excludes
- }
- return src, nil
-}
-
-// Query returns the parameters in a form suitable for a URL query string.
-func (fsc *FetchSourceContext) Query() url.Values {
- params := url.Values{}
- if fsc.fetchSource {
- if len(fsc.includes) > 0 {
- params.Add("_source_include", strings.Join(fsc.includes, ","))
- }
- if len(fsc.excludes) > 0 {
- params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
- }
- } else {
- params.Add("_source", "false")
- }
- return params
-}
diff --git a/vendor/github.com/olivere/elastic/fetch_source_context_test.go b/vendor/github.com/olivere/elastic/fetch_source_context_test.go
deleted file mode 100644
index b98549036..000000000
--- a/vendor/github.com/olivere/elastic/fetch_source_context_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestFetchSourceContextNoFetchSource(t *testing.T) {
- builder := NewFetchSourceContext(false)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `false`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) {
- builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `false`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFetchSourceContextFetchSource(t *testing.T) {
- builder := NewFetchSourceContext(true)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `true`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFetchSourceContextFetchSourceWithIncludesOnly(t *testing.T) {
- builder := NewFetchSourceContext(true).Include("a", "b")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"includes":["a","b"]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) {
- builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"excludes":["c"],"includes":["a","b"]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFetchSourceContextQueryDefaults(t *testing.T) {
- builder := NewFetchSourceContext(true)
- values := builder.Query()
- got := values.Encode()
- expected := ""
- if got != expected {
- t.Errorf("expected %q; got: %q", expected, got)
- }
-}
-
-func TestFetchSourceContextQueryNoFetchSource(t *testing.T) {
- builder := NewFetchSourceContext(false)
- values := builder.Query()
- got := values.Encode()
- expected := "_source=false"
- if got != expected {
- t.Errorf("expected %q; got: %q", expected, got)
- }
-}
-
-func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) {
- builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
- values := builder.Query()
- got := values.Encode()
- expected := "_source_exclude=c&_source_include=a%2Cb"
- if got != expected {
- t.Errorf("expected %q; got: %q", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/field_caps.go b/vendor/github.com/olivere/elastic/field_caps.go
deleted file mode 100644
index 393cd3ce8..000000000
--- a/vendor/github.com/olivere/elastic/field_caps.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// FieldCapsService allows retrieving the capabilities of fields among multiple indices.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-field-caps.html
-// for details
-type FieldCapsService struct {
- client *Client
- pretty bool
- index []string
- allowNoIndices *bool
- expandWildcards string
- fields []string
- ignoreUnavailable *bool
- bodyJson interface{}
- bodyString string
-}
-
-// NewFieldCapsService creates a new FieldCapsService
-func NewFieldCapsService(client *Client) *FieldCapsService {
- return &FieldCapsService{
- client: client,
- }
-}
-
-// Index is a list of index names; use `_all` or empty string to perform
-// the operation on all indices.
-func (s *FieldCapsService) Index(index ...string) *FieldCapsService {
- s.index = append(s.index, index...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices expression
-// resolves into no concrete indices.
-// (This includes `_all` string or when no indices have been specified).
-func (s *FieldCapsService) AllowNoIndices(allowNoIndices bool) *FieldCapsService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *FieldCapsService) ExpandWildcards(expandWildcards string) *FieldCapsService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Fields is a list of fields for to get field capabilities.
-func (s *FieldCapsService) Fields(fields ...string) *FieldCapsService {
- s.fields = append(s.fields, fields...)
- return s
-}
-
-// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed).
-func (s *FieldCapsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldCapsService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *FieldCapsService) Pretty(pretty bool) *FieldCapsService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
-func (s *FieldCapsService) BodyJson(body interface{}) *FieldCapsService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
-func (s *FieldCapsService) BodyString(body string) *FieldCapsService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *FieldCapsService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_field_caps", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_field_caps"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if len(s.fields) > 0 {
- params.Set("fields", strings.Join(s.fields, ","))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *FieldCapsService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *FieldCapsService) Do(ctx context.Context) (*FieldCapsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- IgnoreErrors: []int{http.StatusNotFound},
- })
- if err != nil {
- return nil, err
- }
-
- // TODO(oe): Is 404 really a valid response here?
- if res.StatusCode == http.StatusNotFound {
- return &FieldCapsResponse{}, nil
- }
-
- // Return operation response
- ret := new(FieldCapsResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Request --
-
-// FieldCapsRequest can be used to set up the body to be used in the
-// Field Capabilities API.
-type FieldCapsRequest struct {
- Fields []string `json:"fields"`
-}
-
-// -- Response --
-
-// FieldCapsResponse contains field capabilities.
-type FieldCapsResponse struct {
- Fields map[string]FieldCaps `json:"fields,omitempty"`
-}
-
-// FieldCaps contains capabilities of an individual field.
-type FieldCaps struct {
- Type string `json:"type"`
- Searchable bool `json:"searchable"`
- Aggregatable bool `json:"aggregatable"`
- Indices []string `json:"indices,omitempty"`
- NonSearchableIndices []string `json:"non_searchable_indices,omitempty"`
- NonAggregatableIndices []string `json:"non_aggregatable_indices,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/field_caps_test.go b/vendor/github.com/olivere/elastic/field_caps_test.go
deleted file mode 100644
index e299fd516..000000000
--- a/vendor/github.com/olivere/elastic/field_caps_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "reflect"
- "sort"
- "testing"
-)
-
-func TestFieldCapsURLs(t *testing.T) {
- tests := []struct {
- Service *FieldCapsService
- ExpectedPath string
- ExpectedParams url.Values
- }{
- {
- Service: &FieldCapsService{},
- ExpectedPath: "/_field_caps",
- ExpectedParams: url.Values{},
- },
- {
- Service: &FieldCapsService{
- index: []string{"index1", "index2"},
- },
- ExpectedPath: "/index1%2Cindex2/_field_caps",
- ExpectedParams: url.Values{},
- },
- {
- Service: &FieldCapsService{
- index: []string{"index_*"},
- pretty: true,
- },
- ExpectedPath: "/index_%2A/_field_caps",
- ExpectedParams: url.Values{"pretty": []string{"true"}},
- },
- }
-
- for _, test := range tests {
- gotPath, gotParams, err := test.Service.buildURL()
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if gotPath != test.ExpectedPath {
- t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
- }
- if gotParams.Encode() != test.ExpectedParams.Encode() {
- t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
- }
- }
-}
-
-func TestFieldCapsRequestSerialize(t *testing.T) {
- req := &FieldCapsRequest{
- Fields: []string{"creation_date", "answer_count"},
- }
- data, err := json.Marshal(req)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fields":["creation_date","answer_count"]}`
- if got != expected {
- t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldCapsRequestDeserialize(t *testing.T) {
- body := `{
- "fields" : ["creation_date", "answer_count"]
- }`
-
- var request FieldCapsRequest
- if err := json.Unmarshal([]byte(body), &request); err != nil {
- t.Fatalf("unexpected error during unmarshalling: %v", err)
- }
-
- sort.Sort(lexicographically{request.Fields})
-
- expectedFields := []string{"answer_count", "creation_date"}
- if !reflect.DeepEqual(request.Fields, expectedFields) {
- t.Fatalf("expected fields to be %v, got %v", expectedFields, request.Fields)
- }
-}
-
-func TestFieldCapsResponseUnmarshalling(t *testing.T) {
- clusterStats := `{
- "_shards": {
- "total": 1,
- "successful": 1,
- "failed": 0
- },
- "fields": {
- "creation_date": {
- "type": "date",
- "searchable": true,
- "aggregatable": true,
- "indices": ["index1", "index2"],
- "non_searchable_indices": null,
- "non_aggregatable_indices": null
- },
- "answer": {
- "type": "keyword",
- "searchable": true,
- "aggregatable": true
- }
- }
- }`
-
- var resp FieldCapsResponse
- if err := json.Unmarshal([]byte(clusterStats), &resp); err != nil {
- t.Errorf("unexpected error during unmarshalling: %v", err)
- }
-
- caps, ok := resp.Fields["creation_date"]
- if !ok {
- t.Errorf("expected creation_date to be in the fields map, didn't find it")
- }
- if want, have := true, caps.Searchable; want != have {
- t.Errorf("expected creation_date searchable to be %v, got %v", want, have)
- }
- if want, have := true, caps.Aggregatable; want != have {
- t.Errorf("expected creation_date aggregatable to be %v, got %v", want, have)
- }
- if want, have := []string{"index1", "index2"}, caps.Indices; !reflect.DeepEqual(want, have) {
- t.Errorf("expected creation_date indices to be %v, got %v", want, have)
- }
-}
-
-func TestFieldCaps123(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
- // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- res, err := client.FieldCaps("_all").Fields("user", "message", "retweets", "created").Pretty(true).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if res == nil {
- t.Fatalf("expected response; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/geo_point.go b/vendor/github.com/olivere/elastic/geo_point.go
deleted file mode 100644
index fb243671d..000000000
--- a/vendor/github.com/olivere/elastic/geo_point.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// GeoPoint is a geographic position described via latitude and longitude.
-type GeoPoint struct {
- Lat float64 `json:"lat"`
- Lon float64 `json:"lon"`
-}
-
-// Source returns the object to be serialized in Elasticsearch DSL.
-func (pt *GeoPoint) Source() map[string]float64 {
- return map[string]float64{
- "lat": pt.Lat,
- "lon": pt.Lon,
- }
-}
-
-// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
-func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
- return &GeoPoint{Lat: lat, Lon: lon}
-}
-
-// GeoPointFromString initializes a new GeoPoint by a string that is
-// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
-func GeoPointFromString(latLon string) (*GeoPoint, error) {
- latlon := strings.SplitN(latLon, ",", 2)
- if len(latlon) != 2 {
- return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
- }
- lat, err := strconv.ParseFloat(latlon[0], 64)
- if err != nil {
- return nil, err
- }
- lon, err := strconv.ParseFloat(latlon[1], 64)
- if err != nil {
- return nil, err
- }
- return &GeoPoint{Lat: lat, Lon: lon}, nil
-}
diff --git a/vendor/github.com/olivere/elastic/geo_point_test.go b/vendor/github.com/olivere/elastic/geo_point_test.go
deleted file mode 100644
index 1d085cd38..000000000
--- a/vendor/github.com/olivere/elastic/geo_point_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGeoPointSource(t *testing.T) {
- pt := GeoPoint{Lat: 40, Lon: -70}
-
- data, err := json.Marshal(pt.Source())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"lat":40,"lon":-70}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/get.go b/vendor/github.com/olivere/elastic/get.go
deleted file mode 100644
index efcc748bb..000000000
--- a/vendor/github.com/olivere/elastic/get.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// GetService allows to get a typed JSON document from the index based
-// on its id.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-get.html
-// for details.
-type GetService struct {
- client *Client
- pretty bool
- index string
- typ string
- id string
- routing string
- preference string
- storedFields []string
- refresh string
- realtime *bool
- fsc *FetchSourceContext
- version interface{}
- versionType string
- parent string
- ignoreErrorsOnGeneratedFields *bool
-}
-
-// NewGetService creates a new GetService.
-func NewGetService(client *Client) *GetService {
- return &GetService{
- client: client,
- typ: "_all",
- }
-}
-
-// Index is the name of the index.
-func (s *GetService) Index(index string) *GetService {
- s.index = index
- return s
-}
-
-// Type is the type of the document (use `_all` to fetch the first document
-// matching the ID across all types).
-func (s *GetService) Type(typ string) *GetService {
- s.typ = typ
- return s
-}
-
-// Id is the document ID.
-func (s *GetService) Id(id string) *GetService {
- s.id = id
- return s
-}
-
-// Parent is the ID of the parent document.
-func (s *GetService) Parent(parent string) *GetService {
- s.parent = parent
- return s
-}
-
-// Routing is the specific routing value.
-func (s *GetService) Routing(routing string) *GetService {
- s.routing = routing
- return s
-}
-
-// Preference specifies the node or shard the operation should be performed on (default: random).
-func (s *GetService) Preference(preference string) *GetService {
- s.preference = preference
- return s
-}
-
-// StoredFields is a list of fields to return in the response.
-func (s *GetService) StoredFields(storedFields ...string) *GetService {
- s.storedFields = append(s.storedFields, storedFields...)
- return s
-}
-
-func (s *GetService) FetchSource(fetchSource bool) *GetService {
- if s.fsc == nil {
- s.fsc = NewFetchSourceContext(fetchSource)
- } else {
- s.fsc.SetFetchSource(fetchSource)
- }
- return s
-}
-
-func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
- s.fsc = fetchSourceContext
- return s
-}
-
-// Refresh the shard containing the document before performing the operation.
-func (s *GetService) Refresh(refresh string) *GetService {
- s.refresh = refresh
- return s
-}
-
-// Realtime specifies whether to perform the operation in realtime or search mode.
-func (s *GetService) Realtime(realtime bool) *GetService {
- s.realtime = &realtime
- return s
-}
-
-// VersionType is the specific version type.
-func (s *GetService) VersionType(versionType string) *GetService {
- s.versionType = versionType
- return s
-}
-
-// Version is an explicit version number for concurrency control.
-func (s *GetService) Version(version interface{}) *GetService {
- s.version = version
- return s
-}
-
-// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that
-// are generated if the transaction log is accessed.
-func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
- s.ignoreErrorsOnGeneratedFields = &ignore
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *GetService) Pretty(pretty bool) *GetService {
- s.pretty = pretty
- return s
-}
-
-// Validate checks if the operation is valid.
-func (s *GetService) Validate() error {
- var invalid []string
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if s.typ == "" {
- invalid = append(invalid, "Type")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// buildURL builds the URL for the operation.
-func (s *GetService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
- "id": s.id,
- "index": s.index,
- "type": s.typ,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.parent != "" {
- params.Set("parent", s.parent)
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if len(s.storedFields) > 0 {
- params.Set("stored_fields", strings.Join(s.storedFields, ","))
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
- if s.realtime != nil {
- params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
- }
- if s.ignoreErrorsOnGeneratedFields != nil {
- params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields))
- }
- if s.fsc != nil {
- for k, values := range s.fsc.Query() {
- params.Add(k, strings.Join(values, ","))
- }
- }
- return path, params, nil
-}
-
-// Do executes the operation.
-func (s *GetService) Do(ctx context.Context) (*GetResult, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(GetResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Result of a get request.
-
-// GetResult is the outcome of GetService.Do.
-type GetResult struct {
- Index string `json:"_index"` // index meta field
- Type string `json:"_type"` // type meta field
- Id string `json:"_id"` // id meta field
- Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
- Routing string `json:"_routing"` // routing meta field
- Parent string `json:"_parent"` // parent meta field
- Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
- Source *json.RawMessage `json:"_source,omitempty"`
- Found bool `json:"found,omitempty"`
- Fields map[string]interface{} `json:"fields,omitempty"`
- //Error string `json:"error,omitempty"` // used only in MultiGet
- // TODO double-check that MultiGet now returns details error information
- Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
-}
diff --git a/vendor/github.com/olivere/elastic/get_test.go b/vendor/github.com/olivere/elastic/get_test.go
deleted file mode 100644
index f9504bdbf..000000000
--- a/vendor/github.com/olivere/elastic/get_test.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestGet(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Get document 1
- res, err := client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Found != true {
- t.Errorf("expected Found = true; got %v", res.Found)
- }
- if res.Source == nil {
- t.Errorf("expected Source != nil; got %v", res.Source)
- }
-
- // Get non existent document 99
- res, err = client.Get().Index(testIndexName).Type("doc").Id("99").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected error; got: %v", err)
- }
- if !IsNotFound(err) {
- t.Errorf("expected NotFound error; got: %v", err)
- }
- if res != nil {
- t.Errorf("expected no response; got: %v", res)
- }
-}
-
-func TestGetWithSourceFiltering(t *testing.T) {
- client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Get document 1, without source
- res, err := client.Get().Index(testIndexName).Type("doc").Id("1").FetchSource(false).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Found != true {
- t.Errorf("expected Found = true; got %v", res.Found)
- }
- if res.Source != nil {
- t.Errorf("expected Source == nil; got %v", res.Source)
- }
-
- // Get document 1, exclude Message field
- fsc := NewFetchSourceContext(true).Exclude("message")
- res, err = client.Get().Index(testIndexName).Type("doc").Id("1").FetchSourceContext(fsc).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Found != true {
- t.Errorf("expected Found = true; got %v", res.Found)
- }
- if res.Source == nil {
- t.Errorf("expected Source != nil; got %v", res.Source)
- }
- var tw tweet
- err = json.Unmarshal(*res.Source, &tw)
- if err != nil {
- t.Fatal(err)
- }
- if tw.User != "olivere" {
- t.Errorf("expected user %q; got: %q", "olivere", tw.User)
- }
- if tw.Message != "" {
- t.Errorf("expected message %q; got: %q", "", tw.Message)
- }
-}
-
-func TestGetWithFields(t *testing.T) {
- client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Get document 1, specifying fields
- res, err := client.Get().Index(testIndexName).Type("doc").Id("1").StoredFields("message").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Found != true {
- t.Errorf("expected Found = true; got: %v", res.Found)
- }
-
- // We must NOT have the "user" field
- _, ok := res.Fields["user"]
- if ok {
- t.Fatalf("expected no field %q in document", "user")
- }
-
- // We must have the "message" field
- messageField, ok := res.Fields["message"]
- if !ok {
- t.Fatalf("expected field %q in document", "message")
- }
-
- // Depending on the version of elasticsearch the message field will be returned
- // as a string or a slice of strings. This test works in both cases.
-
- messageString, ok := messageField.(string)
- if !ok {
- messageArray, ok := messageField.([]interface{})
- if !ok {
- t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField)
- } else {
- messageString, ok = messageArray[0].(string)
- if !ok {
- t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField)
- }
- }
- }
-
- if messageString != tweet1.Message {
- t.Errorf("expected message %q; got: %q", tweet1.Message, messageString)
- }
-}
-
-func TestGetValidate(t *testing.T) {
- // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name
- client := setupTestClientAndCreateIndex(t)
-
- if _, err := client.Get().Do(context.TODO()); err == nil {
- t.Fatal("expected Get to fail")
- }
- if _, err := client.Get().Index(testIndexName).Do(context.TODO()); err == nil {
- t.Fatal("expected Get to fail")
- }
- if _, err := client.Get().Type("doc").Do(context.TODO()); err == nil {
- t.Fatal("expected Get to fail")
- }
- if _, err := client.Get().Id("1").Do(context.TODO()); err == nil {
- t.Fatal("expected Get to fail")
- }
- if _, err := client.Get().Index(testIndexName).Type("doc").Do(context.TODO()); err == nil {
- t.Fatal("expected Get to fail")
- }
- if _, err := client.Get().Type("doc").Id("1").Do(context.TODO()); err == nil {
- t.Fatal("expected Get to fail")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/highlight.go b/vendor/github.com/olivere/elastic/highlight.go
deleted file mode 100644
index 6d8d2ba63..000000000
--- a/vendor/github.com/olivere/elastic/highlight.go
+++ /dev/null
@@ -1,469 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// Highlight allows highlighting search results on one or more fields.
-// For details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-highlighting.html
-type Highlight struct {
- fields []*HighlighterField
- tagsSchema *string
- highlightFilter *bool
- fragmentSize *int
- numOfFragments *int
- preTags []string
- postTags []string
- order *string
- encoder *string
- requireFieldMatch *bool
- boundaryMaxScan *int
- boundaryChars *string
- boundaryScannerType *string
- boundaryScannerLocale *string
- highlighterType *string
- fragmenter *string
- highlightQuery Query
- noMatchSize *int
- phraseLimit *int
- options map[string]interface{}
- forceSource *bool
- useExplicitFieldOrder bool
-}
-
-func NewHighlight() *Highlight {
- hl := &Highlight{
- options: make(map[string]interface{}),
- }
- return hl
-}
-
-func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
- hl.fields = append(hl.fields, fields...)
- return hl
-}
-
-func (hl *Highlight) Field(name string) *Highlight {
- field := NewHighlighterField(name)
- hl.fields = append(hl.fields, field)
- return hl
-}
-
-func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
- hl.tagsSchema = &schemaName
- return hl
-}
-
-func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
- hl.highlightFilter = &highlightFilter
- return hl
-}
-
-func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
- hl.fragmentSize = &fragmentSize
- return hl
-}
-
-func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
- hl.numOfFragments = &numOfFragments
- return hl
-}
-
-func (hl *Highlight) Encoder(encoder string) *Highlight {
- hl.encoder = &encoder
- return hl
-}
-
-func (hl *Highlight) PreTags(preTags ...string) *Highlight {
- hl.preTags = append(hl.preTags, preTags...)
- return hl
-}
-
-func (hl *Highlight) PostTags(postTags ...string) *Highlight {
- hl.postTags = append(hl.postTags, postTags...)
- return hl
-}
-
-func (hl *Highlight) Order(order string) *Highlight {
- hl.order = &order
- return hl
-}
-
-func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
- hl.requireFieldMatch = &requireFieldMatch
- return hl
-}
-
-func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
- hl.boundaryMaxScan = &boundaryMaxScan
- return hl
-}
-
-func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight {
- hl.boundaryChars = &boundaryChars
- return hl
-}
-
-func (hl *Highlight) BoundaryScannerType(boundaryScannerType string) *Highlight {
- hl.boundaryScannerType = &boundaryScannerType
- return hl
-}
-
-func (hl *Highlight) BoundaryScannerLocale(boundaryScannerLocale string) *Highlight {
- hl.boundaryScannerLocale = &boundaryScannerLocale
- return hl
-}
-
-func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
- hl.highlighterType = &highlighterType
- return hl
-}
-
-func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
- hl.fragmenter = &fragmenter
- return hl
-}
-
-func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
- hl.highlightQuery = highlightQuery
- return hl
-}
-
-func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
- hl.noMatchSize = &noMatchSize
- return hl
-}
-
-func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
- hl.options = options
- return hl
-}
-
-func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
- hl.forceSource = &forceSource
- return hl
-}
-
-func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
- hl.useExplicitFieldOrder = useExplicitFieldOrder
- return hl
-}
-
-// Creates the query source for the bool query.
-func (hl *Highlight) Source() (interface{}, error) {
- // Returns the map inside of "highlight":
- // "highlight":{
- // ... this ...
- // }
- source := make(map[string]interface{})
- if hl.tagsSchema != nil {
- source["tags_schema"] = *hl.tagsSchema
- }
- if hl.preTags != nil && len(hl.preTags) > 0 {
- source["pre_tags"] = hl.preTags
- }
- if hl.postTags != nil && len(hl.postTags) > 0 {
- source["post_tags"] = hl.postTags
- }
- if hl.order != nil {
- source["order"] = *hl.order
- }
- if hl.highlightFilter != nil {
- source["highlight_filter"] = *hl.highlightFilter
- }
- if hl.fragmentSize != nil {
- source["fragment_size"] = *hl.fragmentSize
- }
- if hl.numOfFragments != nil {
- source["number_of_fragments"] = *hl.numOfFragments
- }
- if hl.encoder != nil {
- source["encoder"] = *hl.encoder
- }
- if hl.requireFieldMatch != nil {
- source["require_field_match"] = *hl.requireFieldMatch
- }
- if hl.boundaryMaxScan != nil {
- source["boundary_max_scan"] = *hl.boundaryMaxScan
- }
- if hl.boundaryChars != nil {
- source["boundary_chars"] = *hl.boundaryChars
- }
- if hl.boundaryScannerType != nil {
- source["boundary_scanner"] = *hl.boundaryScannerType
- }
- if hl.boundaryScannerLocale != nil {
- source["boundary_scanner_locale"] = *hl.boundaryScannerLocale
- }
- if hl.highlighterType != nil {
- source["type"] = *hl.highlighterType
- }
- if hl.fragmenter != nil {
- source["fragmenter"] = *hl.fragmenter
- }
- if hl.highlightQuery != nil {
- src, err := hl.highlightQuery.Source()
- if err != nil {
- return nil, err
- }
- source["highlight_query"] = src
- }
- if hl.noMatchSize != nil {
- source["no_match_size"] = *hl.noMatchSize
- }
- if hl.phraseLimit != nil {
- source["phrase_limit"] = *hl.phraseLimit
- }
- if hl.options != nil && len(hl.options) > 0 {
- source["options"] = hl.options
- }
- if hl.forceSource != nil {
- source["force_source"] = *hl.forceSource
- }
-
- if hl.fields != nil && len(hl.fields) > 0 {
- if hl.useExplicitFieldOrder {
- // Use a slice for the fields
- var fields []map[string]interface{}
- for _, field := range hl.fields {
- src, err := field.Source()
- if err != nil {
- return nil, err
- }
- fmap := make(map[string]interface{})
- fmap[field.Name] = src
- fields = append(fields, fmap)
- }
- source["fields"] = fields
- } else {
- // Use a map for the fields
- fields := make(map[string]interface{}, 0)
- for _, field := range hl.fields {
- src, err := field.Source()
- if err != nil {
- return nil, err
- }
- fields[field.Name] = src
- }
- source["fields"] = fields
- }
- }
-
- return source, nil
-}
-
-// HighlighterField specifies a highlighted field.
-type HighlighterField struct {
- Name string
-
- preTags []string
- postTags []string
- fragmentSize int
- fragmentOffset int
- numOfFragments int
- highlightFilter *bool
- order *string
- requireFieldMatch *bool
- boundaryMaxScan int
- boundaryChars []rune
- highlighterType *string
- fragmenter *string
- highlightQuery Query
- noMatchSize *int
- matchedFields []string
- phraseLimit *int
- options map[string]interface{}
- forceSource *bool
-
- /*
- Name string
- preTags []string
- postTags []string
- fragmentSize int
- numOfFragments int
- fragmentOffset int
- highlightFilter *bool
- order string
- requireFieldMatch *bool
- boundaryMaxScan int
- boundaryChars []rune
- highlighterType string
- fragmenter string
- highlightQuery Query
- noMatchSize *int
- matchedFields []string
- options map[string]interface{}
- forceSource *bool
- */
-}
-
-func NewHighlighterField(name string) *HighlighterField {
- return &HighlighterField{
- Name: name,
- preTags: make([]string, 0),
- postTags: make([]string, 0),
- fragmentSize: -1,
- fragmentOffset: -1,
- numOfFragments: -1,
- boundaryMaxScan: -1,
- boundaryChars: make([]rune, 0),
- matchedFields: make([]string, 0),
- options: make(map[string]interface{}),
- }
-}
-
-func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
- f.preTags = append(f.preTags, preTags...)
- return f
-}
-
-func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
- f.postTags = append(f.postTags, postTags...)
- return f
-}
-
-func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
- f.fragmentSize = fragmentSize
- return f
-}
-
-func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
- f.fragmentOffset = fragmentOffset
- return f
-}
-
-func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
- f.numOfFragments = numOfFragments
- return f
-}
-
-func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
- f.highlightFilter = &highlightFilter
- return f
-}
-
-func (f *HighlighterField) Order(order string) *HighlighterField {
- f.order = &order
- return f
-}
-
-func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
- f.requireFieldMatch = &requireFieldMatch
- return f
-}
-
-func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
- f.boundaryMaxScan = boundaryMaxScan
- return f
-}
-
-func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
- f.boundaryChars = append(f.boundaryChars, boundaryChars...)
- return f
-}
-
-func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
- f.highlighterType = &highlighterType
- return f
-}
-
-func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
- f.fragmenter = &fragmenter
- return f
-}
-
-func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
- f.highlightQuery = highlightQuery
- return f
-}
-
-func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
- f.noMatchSize = &noMatchSize
- return f
-}
-
-func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
- f.options = options
- return f
-}
-
-func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
- f.matchedFields = append(f.matchedFields, matchedFields...)
- return f
-}
-
-func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
- f.phraseLimit = &phraseLimit
- return f
-}
-
-func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
- f.forceSource = &forceSource
- return f
-}
-
-func (f *HighlighterField) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- if f.preTags != nil && len(f.preTags) > 0 {
- source["pre_tags"] = f.preTags
- }
- if f.postTags != nil && len(f.postTags) > 0 {
- source["post_tags"] = f.postTags
- }
- if f.fragmentSize != -1 {
- source["fragment_size"] = f.fragmentSize
- }
- if f.numOfFragments != -1 {
- source["number_of_fragments"] = f.numOfFragments
- }
- if f.fragmentOffset != -1 {
- source["fragment_offset"] = f.fragmentOffset
- }
- if f.highlightFilter != nil {
- source["highlight_filter"] = *f.highlightFilter
- }
- if f.order != nil {
- source["order"] = *f.order
- }
- if f.requireFieldMatch != nil {
- source["require_field_match"] = *f.requireFieldMatch
- }
- if f.boundaryMaxScan != -1 {
- source["boundary_max_scan"] = f.boundaryMaxScan
- }
- if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
- source["boundary_chars"] = f.boundaryChars
- }
- if f.highlighterType != nil {
- source["type"] = *f.highlighterType
- }
- if f.fragmenter != nil {
- source["fragmenter"] = *f.fragmenter
- }
- if f.highlightQuery != nil {
- src, err := f.highlightQuery.Source()
- if err != nil {
- return nil, err
- }
- source["highlight_query"] = src
- }
- if f.noMatchSize != nil {
- source["no_match_size"] = *f.noMatchSize
- }
- if f.matchedFields != nil && len(f.matchedFields) > 0 {
- source["matched_fields"] = f.matchedFields
- }
- if f.phraseLimit != nil {
- source["phrase_limit"] = *f.phraseLimit
- }
- if f.options != nil && len(f.options) > 0 {
- source["options"] = f.options
- }
- if f.forceSource != nil {
- source["force_source"] = *f.forceSource
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/highlight_test.go b/vendor/github.com/olivere/elastic/highlight_test.go
deleted file mode 100644
index c7b972c44..000000000
--- a/vendor/github.com/olivere/elastic/highlight_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestHighlighterField(t *testing.T) {
- field := NewHighlighterField("grade")
- src, err := field.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHighlighterFieldWithOptions(t *testing.T) {
- field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1)
- src, err := field.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fragment_size":2,"number_of_fragments":1}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHighlightWithStringField(t *testing.T) {
- builder := NewHighlight().Field("grade")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fields":{"grade":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHighlightWithFields(t *testing.T) {
- gradeField := NewHighlighterField("grade")
- builder := NewHighlight().Fields(gradeField)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fields":{"grade":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHighlightWithMultipleFields(t *testing.T) {
- gradeField := NewHighlighterField("grade")
- colorField := NewHighlighterField("color")
- builder := NewHighlight().Fields(gradeField, colorField)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fields":{"color":{},"grade":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
- gradeField := NewHighlighterField("grade").FragmentSize(2)
- colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1)
- builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHighlightWithBoundarySettings(t *testing.T) {
- builder := NewHighlight().
- BoundaryChars(" \t\r").
- BoundaryScannerType("word")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"boundary_chars":" \t\r","boundary_scanner":"word"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHighlightWithTermQuery(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Specify highlighter
- hl := NewHighlight()
- hl = hl.Fields(NewHighlighterField("message"))
- hl = hl.PreTags("<em>").PostTags("</em>")
-
- // Match all should return all documents
- query := NewPrefixQuery("message", "golang")
- searchResult, err := client.Search().
- Index(testIndexName).
- Highlight(hl).
- Query(query).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Fatalf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 1 {
- t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 1 {
- t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
- }
-
- hit := searchResult.Hits.Hits[0]
- var tw tweet
- if err := json.Unmarshal(*hit.Source, &tw); err != nil {
- t.Fatal(err)
- }
- if hit.Highlight == nil || len(hit.Highlight) == 0 {
- t.Fatal("expected hit to have a highlight; got nil")
- }
- if hl, found := hit.Highlight["message"]; found {
- if len(hl) != 1 {
- t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl))
- }
- expected := "Welcome to <em>Golang</em> and Elasticsearch."
- if hl[0] != expected {
- t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0])
- }
- } else {
- t.Fatal("expected to have a highlight on field \"message\"; got none")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/index.go b/vendor/github.com/olivere/elastic/index.go
deleted file mode 100644
index 4a4c3278e..000000000
--- a/vendor/github.com/olivere/elastic/index.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndexService adds or updates a typed JSON document in a specified index,
-// making it searchable.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html
-// for details.
-type IndexService struct {
- client *Client
- pretty bool
- id string
- index string
- typ string
- parent string
- routing string
- timeout string
- timestamp string
- ttl string
- version interface{}
- opType string
- versionType string
- refresh string
- waitForActiveShards string
- pipeline string
- bodyJson interface{}
- bodyString string
-}
-
-// NewIndexService creates a new IndexService.
-func NewIndexService(client *Client) *IndexService {
- return &IndexService{
- client: client,
- }
-}
-
-// Id is the document ID.
-func (s *IndexService) Id(id string) *IndexService {
- s.id = id
- return s
-}
-
-// Index is the name of the index.
-func (s *IndexService) Index(index string) *IndexService {
- s.index = index
- return s
-}
-
-// Type is the type of the document.
-func (s *IndexService) Type(typ string) *IndexService {
- s.typ = typ
- return s
-}
-
-// WaitForActiveShards sets the number of shard copies that must be active
-// before proceeding with the index operation. Defaults to 1, meaning the
-// primary shard only. Set to `all` for all shard copies, otherwise set to
-// any non-negative value less than or equal to the total number of copies
-// for the shard (number of replicas + 1).
-func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// Pipeline specifies the pipeline id to preprocess incoming documents with.
-func (s *IndexService) Pipeline(pipeline string) *IndexService {
- s.pipeline = pipeline
- return s
-}
-
-// Refresh the index after performing the operation.
-func (s *IndexService) Refresh(refresh string) *IndexService {
- s.refresh = refresh
- return s
-}
-
-// Ttl is an expiration time for the document.
-func (s *IndexService) Ttl(ttl string) *IndexService {
- s.ttl = ttl
- return s
-}
-
-// TTL is an expiration time for the document (alias for Ttl).
-func (s *IndexService) TTL(ttl string) *IndexService {
- s.ttl = ttl
- return s
-}
-
-// Version is an explicit version number for concurrency control.
-func (s *IndexService) Version(version interface{}) *IndexService {
- s.version = version
- return s
-}
-
-// OpType is an explicit operation type, i.e. "create" or "index" (default).
-func (s *IndexService) OpType(opType string) *IndexService {
- s.opType = opType
- return s
-}
-
-// Parent is the ID of the parent document.
-func (s *IndexService) Parent(parent string) *IndexService {
- s.parent = parent
- return s
-}
-
-// Routing is a specific routing value.
-func (s *IndexService) Routing(routing string) *IndexService {
- s.routing = routing
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndexService) Timeout(timeout string) *IndexService {
- s.timeout = timeout
- return s
-}
-
-// Timestamp is an explicit timestamp for the document.
-func (s *IndexService) Timestamp(timestamp string) *IndexService {
- s.timestamp = timestamp
- return s
-}
-
-// VersionType is a specific version type.
-func (s *IndexService) VersionType(versionType string) *IndexService {
- s.versionType = versionType
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndexService) Pretty(pretty bool) *IndexService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is the document as a serializable JSON interface.
-func (s *IndexService) BodyJson(body interface{}) *IndexService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is the document encoded as a string.
-func (s *IndexService) BodyString(body string) *IndexService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndexService) buildURL() (string, string, url.Values, error) {
- var err error
- var method, path string
-
- if s.id != "" {
- // Create document with manual id
- method = "PUT"
- path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
- "id": s.id,
- "index": s.index,
- "type": s.typ,
- })
- } else {
- // Automatic ID generation
- // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#index-creation
- method = "POST"
- path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
- "index": s.index,
- "type": s.typ,
- })
- }
- if err != nil {
- return "", "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.opType != "" {
- params.Set("op_type", s.opType)
- }
- if s.parent != "" {
- params.Set("parent", s.parent)
- }
- if s.pipeline != "" {
- params.Set("pipeline", s.pipeline)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.timestamp != "" {
- params.Set("timestamp", s.timestamp)
- }
- if s.ttl != "" {
- params.Set("ttl", s.ttl)
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
- return method, path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndexService) Validate() error {
- var invalid []string
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if s.typ == "" {
- invalid = append(invalid, "Type")
- }
- if s.bodyString == "" && s.bodyJson == nil {
- invalid = append(invalid, "BodyJson")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- method, path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: method,
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndexResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndexResponse is the result of indexing a document in Elasticsearch.
-type IndexResponse struct {
- Index string `json:"_index,omitempty"`
- Type string `json:"_type,omitempty"`
- Id string `json:"_id,omitempty"`
- Version int64 `json:"_version,omitempty"`
- Result string `json:"result,omitempty"`
- Shards *shardsInfo `json:"_shards,omitempty"`
- SeqNo int64 `json:"_seq_no,omitempty"`
- PrimaryTerm int64 `json:"_primary_term,omitempty"`
- Status int `json:"status,omitempty"`
- ForcedRefresh bool `json:"forced_refresh,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/index_test.go b/vendor/github.com/olivere/elastic/index_test.go
deleted file mode 100644
index 1a0c38576..000000000
--- a/vendor/github.com/olivere/elastic/index_test.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestIndexLifecycle(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
-
- // Add a document
- indexResult, err := client.Index().
- Index(testIndexName).
- Type("doc").
- Id("1").
- BodyJson(&tweet1).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if indexResult == nil {
- t.Errorf("expected result to be != nil; got: %v", indexResult)
- }
-
- // Exists
- exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !exists {
- t.Errorf("expected exists %v; got %v", true, exists)
- }
-
- // Get document
- getResult, err := client.Get().
- Index(testIndexName).
- Type("doc").
- Id("1").
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if getResult.Index != testIndexName {
- t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
- }
- if getResult.Type != "doc" {
- t.Errorf("expected GetResult.Type %q; got %q", "doc", getResult.Type)
- }
- if getResult.Id != "1" {
- t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
- }
- if getResult.Source == nil {
- t.Errorf("expected GetResult.Source to be != nil; got nil")
- }
-
- // Decode the Source field
- var tweetGot tweet
- err = json.Unmarshal(*getResult.Source, &tweetGot)
- if err != nil {
- t.Fatal(err)
- }
- if tweetGot.User != tweet1.User {
- t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
- }
- if tweetGot.Message != tweet1.Message {
- t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
- }
-
- // Delete document again
- deleteResult, err := client.Delete().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if deleteResult == nil {
- t.Errorf("expected result to be != nil; got: %v", deleteResult)
- }
-
- // Exists
- exists, err = client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if exists {
- t.Errorf("expected exists %v; got %v", false, exists)
- }
-}
-
-func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
-
- // Add a document
- indexResult, err := client.Index().
- Index(testIndexName).
- Type("doc").
- BodyJson(&tweet1).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if indexResult == nil {
- t.Errorf("expected result to be != nil; got: %v", indexResult)
- }
- if indexResult.Id == "" {
- t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id)
- }
- id := indexResult.Id
-
- // Exists
- exists, err := client.Exists().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !exists {
- t.Errorf("expected exists %v; got %v", true, exists)
- }
-
- // Get document
- getResult, err := client.Get().
- Index(testIndexName).
- Type("doc").
- Id(id).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if getResult.Index != testIndexName {
- t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
- }
- if getResult.Type != "doc" {
- t.Errorf("expected GetResult.Type %q; got %q", "doc", getResult.Type)
- }
- if getResult.Id != id {
- t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
- }
- if getResult.Source == nil {
- t.Errorf("expected GetResult.Source to be != nil; got nil")
- }
-
- // Decode the Source field
- var tweetGot tweet
- err = json.Unmarshal(*getResult.Source, &tweetGot)
- if err != nil {
- t.Fatal(err)
- }
- if tweetGot.User != tweet1.User {
- t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
- }
- if tweetGot.Message != tweet1.Message {
- t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
- }
-
- // Delete document again
- deleteResult, err := client.Delete().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if deleteResult == nil {
- t.Errorf("expected result to be != nil; got: %v", deleteResult)
- }
-
- // Exists
- exists, err = client.Exists().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if exists {
- t.Errorf("expected exists %v; got %v", false, exists)
- }
-}
-
-func TestIndexValidate(t *testing.T) {
- client := setupTestClient(t)
-
- tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
-
- // No index name -> fail with error
- res, err := NewIndexService(client).Type("doc").Id("1").BodyJson(&tweet).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Index to fail without index name")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-
- // No index name -> fail with error
- res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected Index to fail without type")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-}
-
-func TestIndexCreateExistsOpenCloseDelete(t *testing.T) {
- // TODO: Find out how to make these test robust
- t.Skip("test fails regularly with 409 (Conflict): " +
- "IndexPrimaryShardNotAllocatedException[[elastic-test] " +
- "primary not allocated post api... skipping")
-
- client := setupTestClient(t)
-
- // Create index
- createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createIndex == nil {
- t.Fatalf("expected response; got: %v", createIndex)
- }
- if !createIndex.Acknowledged {
- t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged)
- }
-
- // Exists
- indexExists, err := client.IndexExists(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !indexExists {
- t.Fatalf("expected index exists=%v; got %v", true, indexExists)
- }
-
- // Flush
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Close index
- closeIndex, err := client.CloseIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if closeIndex == nil {
- t.Fatalf("expected response; got: %v", closeIndex)
- }
- if !closeIndex.Acknowledged {
- t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged)
- }
-
- // Open index
- openIndex, err := client.OpenIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if openIndex == nil {
- t.Fatalf("expected response; got: %v", openIndex)
- }
- if !openIndex.Acknowledged {
- t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged)
- }
-
- // Flush
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Delete index
- deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if deleteIndex == nil {
- t.Fatalf("expected response; got: %v", deleteIndex)
- }
- if !deleteIndex.Acknowledged {
- t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_analyze.go b/vendor/github.com/olivere/elastic/indices_analyze.go
deleted file mode 100644
index fb3a91234..000000000
--- a/vendor/github.com/olivere/elastic/indices_analyze.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesAnalyzeService performs the analysis process on a text and returns
-// the tokens breakdown of the text.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-analyze.html
-// for detail.
-type IndicesAnalyzeService struct {
- client *Client
- pretty bool
- index string
- request *IndicesAnalyzeRequest
- format string
- preferLocal *bool
- bodyJson interface{}
- bodyString string
-}
-
-// NewIndicesAnalyzeService creates a new IndicesAnalyzeService.
-func NewIndicesAnalyzeService(client *Client) *IndicesAnalyzeService {
- return &IndicesAnalyzeService{
- client: client,
- request: new(IndicesAnalyzeRequest),
- }
-}
-
-// Index is the name of the index to scope the operation.
-func (s *IndicesAnalyzeService) Index(index string) *IndicesAnalyzeService {
- s.index = index
- return s
-}
-
-// Format of the output.
-func (s *IndicesAnalyzeService) Format(format string) *IndicesAnalyzeService {
- s.format = format
- return s
-}
-
-// PreferLocal, when true, specifies that a local shard should be used
-// if available. When false, a random shard is used (default: true).
-func (s *IndicesAnalyzeService) PreferLocal(preferLocal bool) *IndicesAnalyzeService {
- s.preferLocal = &preferLocal
- return s
-}
-
-// Request passes the analyze request to use.
-func (s *IndicesAnalyzeService) Request(request *IndicesAnalyzeRequest) *IndicesAnalyzeService {
- if request == nil {
- s.request = new(IndicesAnalyzeRequest)
- } else {
- s.request = request
- }
- return s
-}
-
-// Analyzer is the name of the analyzer to use.
-func (s *IndicesAnalyzeService) Analyzer(analyzer string) *IndicesAnalyzeService {
- s.request.Analyzer = analyzer
- return s
-}
-
-// Attributes is a list of token attributes to output; this parameter works
-// only with explain=true.
-func (s *IndicesAnalyzeService) Attributes(attributes ...string) *IndicesAnalyzeService {
- s.request.Attributes = attributes
- return s
-}
-
-// CharFilter is a list of character filters to use for the analysis.
-func (s *IndicesAnalyzeService) CharFilter(charFilter ...string) *IndicesAnalyzeService {
- s.request.CharFilter = charFilter
- return s
-}
-
-// Explain, when true, outputs more advanced details (default: false).
-func (s *IndicesAnalyzeService) Explain(explain bool) *IndicesAnalyzeService {
- s.request.Explain = explain
- return s
-}
-
-// Field specifies to use a specific analyzer configured for this field (instead of passing the analyzer name).
-func (s *IndicesAnalyzeService) Field(field string) *IndicesAnalyzeService {
- s.request.Field = field
- return s
-}
-
-// Filter is a list of filters to use for the analysis.
-func (s *IndicesAnalyzeService) Filter(filter ...string) *IndicesAnalyzeService {
- s.request.Filter = filter
- return s
-}
-
-// Text is the text on which the analysis should be performed (when request body is not used).
-func (s *IndicesAnalyzeService) Text(text ...string) *IndicesAnalyzeService {
- s.request.Text = text
- return s
-}
-
-// Tokenizer is the name of the tokenizer to use for the analysis.
-func (s *IndicesAnalyzeService) Tokenizer(tokenizer string) *IndicesAnalyzeService {
- s.request.Tokenizer = tokenizer
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesAnalyzeService) Pretty(pretty bool) *IndicesAnalyzeService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is the text on which the analysis should be performed.
-func (s *IndicesAnalyzeService) BodyJson(body interface{}) *IndicesAnalyzeService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is the text on which the analysis should be performed.
-func (s *IndicesAnalyzeService) BodyString(body string) *IndicesAnalyzeService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
-
- if s.index == "" {
- path = "/_analyze"
- } else {
- path, err = uritemplates.Expand("/{index}/_analyze", map[string]string{
- "index": s.index,
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.format != "" {
- params.Set("format", s.format)
- }
- if s.preferLocal != nil {
- params.Set("prefer_local", fmt.Sprintf("%v", *s.preferLocal))
- }
-
- return path, params, nil
-}
-
-// Do will execute the request with the given context.
-func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else if s.bodyString != "" {
- body = s.bodyString
- } else {
- // Request parameters are deprecated in 5.1.1, and we must use a JSON
- // structure in the body to pass the parameters.
- // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-analyze.html
- body = s.request
- }
-
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- ret := new(IndicesAnalyzeResponse)
- if err = s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
-
- return ret, nil
-}
-
-func (s *IndicesAnalyzeService) Validate() error {
- var invalid []string
- if s.bodyJson == nil && s.bodyString == "" {
- if len(s.request.Text) == 0 {
- invalid = append(invalid, "Text")
- }
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// IndicesAnalyzeRequest specifies the parameters of the analyze request.
-type IndicesAnalyzeRequest struct {
- Text []string `json:"text,omitempty"`
- Analyzer string `json:"analyzer,omitempty"`
- Tokenizer string `json:"tokenizer,omitempty"`
- Filter []string `json:"filter,omitempty"`
- CharFilter []string `json:"char_filter,omitempty"`
- Field string `json:"field,omitempty"`
- Explain bool `json:"explain,omitempty"`
- Attributes []string `json:"attributes,omitempty"`
-}
-
-type IndicesAnalyzeResponse struct {
- Tokens []IndicesAnalyzeResponseToken `json:"tokens"` // json part for normal message
- Detail IndicesAnalyzeResponseDetail `json:"detail"` // json part for verbose message of explain request
-}
-
-type IndicesAnalyzeResponseToken struct {
- Token string `json:"token"`
- StartOffset int `json:"start_offset"`
- EndOffset int `json:"end_offset"`
- Type string `json:"type"`
- Position int `json:"position"`
-}
-
-type IndicesAnalyzeResponseDetail struct {
- CustomAnalyzer bool `json:"custom_analyzer"`
- Charfilters []interface{} `json:"charfilters"`
- Analyzer struct {
- Name string `json:"name"`
- Tokens []struct {
- Token string `json:"token"`
- StartOffset int `json:"start_offset"`
- EndOffset int `json:"end_offset"`
- Type string `json:"type"`
- Position int `json:"position"`
- Bytes string `json:"bytes"`
- PositionLength int `json:"positionLength"`
- } `json:"tokens"`
- } `json:"analyzer"`
- Tokenizer struct {
- Name string `json:"name"`
- Tokens []struct {
- Token string `json:"token"`
- StartOffset int `json:"start_offset"`
- EndOffset int `json:"end_offset"`
- Type string `json:"type"`
- Position int `json:"position"`
- } `json:"tokens"`
- } `json:"tokenizer"`
- Tokenfilters []struct {
- Name string `json:"name"`
- Tokens []struct {
- Token string `json:"token"`
- StartOffset int `json:"start_offset"`
- EndOffset int `json:"end_offset"`
- Type string `json:"type"`
- Position int `json:"position"`
- Keyword bool `json:"keyword"`
- } `json:"tokens"`
- } `json:"tokenfilters"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_analyze_test.go b/vendor/github.com/olivere/elastic/indices_analyze_test.go
deleted file mode 100644
index 90dbf1e73..000000000
--- a/vendor/github.com/olivere/elastic/indices_analyze_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesAnalyzeURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Index string
- Expected string
- }{
- {
- "",
- "/_analyze",
- },
- {
- "tweets",
- "/tweets/_analyze",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IndexAnalyze().Index(test.Index).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestIndicesAnalyze(t *testing.T) {
- client := setupTestClient(t)
- // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- res, err := client.IndexAnalyze().Text("hello hi guy").Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error, got %v", err)
- }
- if len(res.Tokens) != 3 {
- t.Fatalf("expected %d, got %d (%+v)", 3, len(res.Tokens), res.Tokens)
- }
-}
-
-func TestIndicesAnalyzeDetail(t *testing.T) {
- client := setupTestClient(t)
- // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- res, err := client.IndexAnalyze().Text("hello hi guy").Explain(true).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error, got %v", err)
- }
-
- if len(res.Detail.Analyzer.Tokens) != 3 {
- t.Fatalf("expected %d tokens, got %d (%+v)", 3, len(res.Detail.Tokenizer.Tokens), res.Detail.Tokenizer.Tokens)
- }
-}
-
-func TestIndicesAnalyzeWithIndex(t *testing.T) {
- client := setupTestClient(t)
-
- _, err := client.IndexAnalyze().Index("foo").Text("hello hi guy").Do(context.TODO())
- if err == nil {
- t.Fatal("expected error, got nil")
- }
- if want, have := "elastic: Error 404 (Not Found): no such index [type=index_not_found_exception]", err.Error(); want != have {
- t.Fatalf("expected error %q, got %q", want, have)
- }
-}
-
-func TestIndicesAnalyzeValidate(t *testing.T) {
- client := setupTestClient(t)
-
- _, err := client.IndexAnalyze().Do(context.TODO())
- if err == nil {
- t.Fatal("expected error, got nil")
- }
- if want, have := "missing required fields: [Text]", err.Error(); want != have {
- t.Fatalf("expected error %q, got %q", want, have)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_close.go b/vendor/github.com/olivere/elastic/indices_close.go
deleted file mode 100644
index 00ecdf966..000000000
--- a/vendor/github.com/olivere/elastic/indices_close.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesCloseService closes an index.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-open-close.html
-// for details.
-type IndicesCloseService struct {
- client *Client
- pretty bool
- index string
- timeout string
- masterTimeout string
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
-}
-
-// NewIndicesCloseService creates and initializes a new IndicesCloseService.
-func NewIndicesCloseService(client *Client) *IndicesCloseService {
- return &IndicesCloseService{client: client}
-}
-
-// Index is the name of the index to close.
-func (s *IndicesCloseService) Index(index string) *IndicesCloseService {
- s.index = index
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService {
- s.timeout = timeout
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
-func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesCloseService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}/_close", map[string]string{
- "index": s.index,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
-
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesCloseService) Validate() error {
- var invalid []string
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesCloseResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesCloseResponse is the response of IndicesCloseService.Do.
-type IndicesCloseResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_close_test.go b/vendor/github.com/olivere/elastic/indices_close_test.go
deleted file mode 100644
index e7a4d9e05..000000000
--- a/vendor/github.com/olivere/elastic/indices_close_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-// TODO(oe): Find out why this test fails on Travis CI.
-/*
-func TestIndicesOpenAndClose(t *testing.T) {
- client := setupTestClient(t)
-
- // Create index
- createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !createIndex.Acknowledged {
- t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
- }
- defer func() {
- // Delete index
- deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !deleteIndex.Acknowledged {
- t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
- }
- }()
-
- waitForYellow := func() {
- // Wait for status yellow
- res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res != nil && res.TimedOut {
- t.Fatalf("cluster time out waiting for status %q", "yellow")
- }
- }
-
- // Wait for cluster
- waitForYellow()
-
- // Close index
- cresp, err := client.CloseIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !cresp.Acknowledged {
- t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName)
- }
-
- // Wait for cluster
- waitForYellow()
-
- // Open index again
- oresp, err := client.OpenIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !oresp.Acknowledged {
- t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName)
- }
-}
-*/
-
-func TestIndicesCloseValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No index name -> fail with error
- res, err := NewIndicesCloseService(client).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected IndicesClose to fail without index name")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_create.go b/vendor/github.com/olivere/elastic/indices_create.go
deleted file mode 100644
index 8d8e0c25e..000000000
--- a/vendor/github.com/olivere/elastic/indices_create.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "errors"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesCreateService creates a new index.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-create-index.html
-// for details.
-type IndicesCreateService struct {
- client *Client
- pretty bool
- index string
- timeout string
- masterTimeout string
- bodyJson interface{}
- bodyString string
-}
-
-// NewIndicesCreateService returns a new IndicesCreateService.
-func NewIndicesCreateService(client *Client) *IndicesCreateService {
- return &IndicesCreateService{client: client}
-}
-
-// Index is the name of the index to create.
-func (b *IndicesCreateService) Index(index string) *IndicesCreateService {
- b.index = index
- return b
-}
-
-// Timeout the explicit operation timeout, e.g. "5s".
-func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService {
- s.timeout = timeout
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Body specifies the configuration of the index as a string.
-// It is an alias for BodyString.
-func (b *IndicesCreateService) Body(body string) *IndicesCreateService {
- b.bodyString = body
- return b
-}
-
-// BodyString specifies the configuration of the index as a string.
-func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService {
- b.bodyString = body
- return b
-}
-
-// BodyJson specifies the configuration of the index. The interface{} will
-// be serializes as a JSON document, so use a map[string]interface{}.
-func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService {
- b.bodyJson = body
- return b
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService {
- b.pretty = pretty
- return b
-}
-
-// Do executes the operation.
-func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) {
- if b.index == "" {
- return nil, errors.New("missing index name")
- }
-
- // Build url
- path, err := uritemplates.Expand("/{index}", map[string]string{
- "index": b.index,
- })
- if err != nil {
- return nil, err
- }
-
- params := make(url.Values)
- if b.pretty {
- params.Set("pretty", "true")
- }
- if b.masterTimeout != "" {
- params.Set("master_timeout", b.masterTimeout)
- }
- if b.timeout != "" {
- params.Set("timeout", b.timeout)
- }
-
- // Setup HTTP request body
- var body interface{}
- if b.bodyJson != nil {
- body = b.bodyJson
- } else {
- body = b.bodyString
- }
-
- // Get response
- res, err := b.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "PUT",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- ret := new(IndicesCreateResult)
- if err := b.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Result of a create index request.
-
-// IndicesCreateResult is the outcome of creating a new index.
-type IndicesCreateResult struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_create_test.go b/vendor/github.com/olivere/elastic/indices_create_test.go
deleted file mode 100644
index f37df1c54..000000000
--- a/vendor/github.com/olivere/elastic/indices_create_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesLifecycle(t *testing.T) {
- client := setupTestClient(t)
-
- // Create index
- createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !createIndex.Acknowledged {
- t.Errorf("expected IndicesCreateResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
- }
-
- // Check if index exists
- indexExists, err := client.IndexExists(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !indexExists {
- t.Fatalf("index %s should exist, but doesn't\n", testIndexName)
- }
-
- // Delete index
- deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !deleteIndex.Acknowledged {
- t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
- }
-
- // Check if index exists
- indexExists, err = client.IndexExists(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if indexExists {
- t.Fatalf("index %s should not exist, but does\n", testIndexName)
- }
-}
-
-func TestIndicesCreateValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No index name -> fail with error
- res, err := NewIndicesCreateService(client).Body(testMapping).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected IndicesCreate to fail without index name")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_delete.go b/vendor/github.com/olivere/elastic/indices_delete.go
deleted file mode 100644
index 2afeca978..000000000
--- a/vendor/github.com/olivere/elastic/indices_delete.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesDeleteService allows to delete existing indices.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html
-// for details.
-type IndicesDeleteService struct {
- client *Client
- pretty bool
- index []string
- timeout string
- masterTimeout string
-}
-
-// NewIndicesDeleteService creates and initializes a new IndicesDeleteService.
-func NewIndicesDeleteService(client *Client) *IndicesDeleteService {
- return &IndicesDeleteService{
- client: client,
- index: make([]string, 0),
- }
-}
-
-// Index adds the list of indices to delete.
-// Use `_all` or `*` string to delete all indices.
-func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService {
- s.index = index
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService {
- s.timeout = timeout
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesDeleteService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesDeleteService) Validate() error {
- var invalid []string
- if len(s.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "DELETE",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesDeleteResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Result of a delete index request.
-
-// IndicesDeleteResponse is the response of IndicesDeleteService.Do.
-type IndicesDeleteResponse struct {
- Acknowledged bool `json:"acknowledged"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_delete_template.go b/vendor/github.com/olivere/elastic/indices_delete_template.go
deleted file mode 100644
index 0ea34cf89..000000000
--- a/vendor/github.com/olivere/elastic/indices_delete_template.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesDeleteTemplateService deletes index templates.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
-type IndicesDeleteTemplateService struct {
- client *Client
- pretty bool
- name string
- timeout string
- masterTimeout string
-}
-
-// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
-func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
- return &IndicesDeleteTemplateService{
- client: client,
- }
-}
-
-// Name is the name of the template.
-func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
- s.name = name
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
- s.timeout = timeout
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_template/{name}", map[string]string{
- "name": s.name,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesDeleteTemplateService) Validate() error {
- var invalid []string
- if s.name == "" {
- invalid = append(invalid, "Name")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "DELETE",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesDeleteTemplateResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
-type IndicesDeleteTemplateResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_delete_test.go b/vendor/github.com/olivere/elastic/indices_delete_test.go
deleted file mode 100644
index db77c7a25..000000000
--- a/vendor/github.com/olivere/elastic/indices_delete_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesDeleteValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No index name -> fail with error
- res, err := NewIndicesDeleteService(client).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected IndicesDelete to fail without index name")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_exists.go b/vendor/github.com/olivere/elastic/indices_exists.go
deleted file mode 100644
index aa9391039..000000000
--- a/vendor/github.com/olivere/elastic/indices_exists.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesExistsService checks if an index or indices exist or not.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-exists.html
-// for details.
-type IndicesExistsService struct {
- client *Client
- pretty bool
- index []string
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
- local *bool
-}
-
-// NewIndicesExistsService creates and initializes a new IndicesExistsService.
-func NewIndicesExistsService(client *Client) *IndicesExistsService {
- return &IndicesExistsService{
- client: client,
- index: make([]string, 0),
- }
-}
-
-// Index is a list of one or more indices to check.
-func (s *IndicesExistsService) Index(index []string) *IndicesExistsService {
- s.index = index
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices expression
-// resolves into no concrete indices. (This includes `_all` string or
-// when no indices have been specified).
-func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Local, when set, returns local information and does not retrieve the state
-// from master node (default: false).
-func (s *IndicesExistsService) Local(local bool) *IndicesExistsService {
- s.local = &local
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesExistsService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesExistsService) Validate() error {
- var invalid []string
- if len(s.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return false, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return false, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "HEAD",
- Path: path,
- Params: params,
- IgnoreErrors: []int{404},
- })
- if err != nil {
- return false, err
- }
-
- // Return operation response
- switch res.StatusCode {
- case http.StatusOK:
- return true, nil
- case http.StatusNotFound:
- return false, nil
- default:
- return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_template.go b/vendor/github.com/olivere/elastic/indices_exists_template.go
deleted file mode 100644
index 40b06e895..000000000
--- a/vendor/github.com/olivere/elastic/indices_exists_template.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesExistsTemplateService checks if a given template exists.
-// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html#indices-templates-exists
-// for documentation.
-type IndicesExistsTemplateService struct {
- client *Client
- pretty bool
- name string
- local *bool
-}
-
-// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
-func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
- return &IndicesExistsTemplateService{
- client: client,
- }
-}
-
-// Name is the name of the template.
-func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
- s.name = name
- return s
-}
-
-// Local indicates whether to return local information, i.e. do not retrieve
-// the state from master node (default: false).
-func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
- s.local = &local
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_template/{name}", map[string]string{
- "name": s.name,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesExistsTemplateService) Validate() error {
- var invalid []string
- if s.name == "" {
- invalid = append(invalid, "Name")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return false, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return false, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "HEAD",
- Path: path,
- Params: params,
- IgnoreErrors: []int{404},
- })
- if err != nil {
- return false, err
- }
-
- // Return operation response
- switch res.StatusCode {
- case http.StatusOK:
- return true, nil
- case http.StatusNotFound:
- return false, nil
- default:
- return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_template_test.go b/vendor/github.com/olivere/elastic/indices_exists_template_test.go
deleted file mode 100644
index a97442971..000000000
--- a/vendor/github.com/olivere/elastic/indices_exists_template_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndexExistsTemplate(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tmpl := `{
- "index_patterns":["elastic-test*"],
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "properties":{
- "tags":{
- "type":"keyword"
- },
- "location":{
- "type":"geo_point"
- },
- "suggest_field":{
- "type":"completion"
- }
- }
- }
- }
-}`
- putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if putres == nil {
- t.Fatalf("expected response; got: %v", putres)
- }
- if !putres.Acknowledged {
- t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged)
- }
-
- // Always delete template
- defer client.IndexDeleteTemplate("elastic-template").Do(context.TODO())
-
- // Check if template exists
- exists, err := client.IndexTemplateExists("elastic-template").Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if !exists {
- t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists)
- }
-
- // Get template
- getres, err := client.IndexGetTemplate("elastic-template").Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if getres == nil {
- t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_test.go b/vendor/github.com/olivere/elastic/indices_exists_test.go
deleted file mode 100644
index 07e3eb518..000000000
--- a/vendor/github.com/olivere/elastic/indices_exists_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesExistsWithoutIndex(t *testing.T) {
- client := setupTestClient(t)
-
- // No index name -> fail with error
- res, err := NewIndicesExistsService(client).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected IndicesExists to fail without index name")
- }
- if res != false {
- t.Fatalf("expected result to be false; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_type.go b/vendor/github.com/olivere/elastic/indices_exists_type.go
deleted file mode 100644
index a4d1ff610..000000000
--- a/vendor/github.com/olivere/elastic/indices_exists_type.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesExistsTypeService checks if one or more types exist in one or more indices.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-types-exists.html
-// for details.
-type IndicesExistsTypeService struct {
- client *Client
- pretty bool
- typ []string
- index []string
- expandWildcards string
- local *bool
- ignoreUnavailable *bool
- allowNoIndices *bool
-}
-
-// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
-func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
- return &IndicesExistsTypeService{
- client: client,
- }
-}
-
-// Index is a list of index names; use `_all` to check the types across all indices.
-func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Type is a list of document types to check.
-func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService {
- s.typ = append(s.typ, types...)
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// (This includes `_all` string or when no indices have been specified).
-func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Local specifies whether to return local information, i.e. do not retrieve
-// the state from master node (default: false).
-func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
- s.local = &local
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
- "index": strings.Join(s.index, ","),
- "type": strings.Join(s.typ, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesExistsTypeService) Validate() error {
- var invalid []string
- if len(s.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(s.typ) == 0 {
- invalid = append(invalid, "Type")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return false, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return false, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "HEAD",
- Path: path,
- Params: params,
- IgnoreErrors: []int{404},
- })
- if err != nil {
- return false, err
- }
-
- // Return operation response
- switch res.StatusCode {
- case http.StatusOK:
- return true, nil
- case http.StatusNotFound:
- return false, nil
- default:
- return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_type_test.go b/vendor/github.com/olivere/elastic/indices_exists_type_test.go
deleted file mode 100644
index 3795bd042..000000000
--- a/vendor/github.com/olivere/elastic/indices_exists_type_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesExistsTypeBuildURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Types []string
- Expected string
- ExpectValidateFailure bool
- }{
- {
- []string{},
- []string{},
- "",
- true,
- },
- {
- []string{"index1"},
- []string{},
- "",
- true,
- },
- {
- []string{},
- []string{"type1"},
- "",
- true,
- },
- {
- []string{"index1"},
- []string{"type1"},
- "/index1/_mapping/type1",
- false,
- },
- {
- []string{"index1", "index2"},
- []string{"type1"},
- "/index1%2Cindex2/_mapping/type1",
- false,
- },
- {
- []string{"index1", "index2"},
- []string{"type1", "type2"},
- "/index1%2Cindex2/_mapping/type1%2Ctype2",
- false,
- },
- }
-
- for i, test := range tests {
- err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate()
- if err == nil && test.ExpectValidateFailure {
- t.Errorf("#%d: expected validate to fail", i+1)
- continue
- }
- if err != nil && !test.ExpectValidateFailure {
- t.Errorf("#%d: expected validate to succeed", i+1)
- continue
- }
- if !test.ExpectValidateFailure {
- path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL()
- if err != nil {
- t.Fatalf("#%d: %v", i+1, err)
- }
- if path != test.Expected {
- t.Errorf("#%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
- }
-}
-
-func TestIndicesExistsType(t *testing.T) {
- client := setupTestClient(t)
-
- // Create index with tweet type
- createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createIndex)
- }
- if !createIndex.Acknowledged {
- t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
- }
-
- // Check if type exists
- exists, err := client.TypeExists().Index(testIndexName).Type("doc").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !exists {
- t.Fatalf("type %s should exist in index %s, but doesn't\n", "doc", testIndexName)
- }
-
- // Delete index
- deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !deleteIndex.Acknowledged {
- t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
- }
-
- // Check if type exists
- exists, err = client.TypeExists().Index(testIndexName).Type("doc").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if exists {
- t.Fatalf("type %s should not exist in index %s, but it does\n", "doc", testIndexName)
- }
-}
-
-func TestIndicesExistsTypeValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No index name -> fail with error
- res, err := NewIndicesExistsTypeService(client).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected IndicesExistsType to fail without index name")
- }
- if res != false {
- t.Fatalf("expected result to be false; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_flush.go b/vendor/github.com/olivere/elastic/indices_flush.go
deleted file mode 100644
index 113e53803..000000000
--- a/vendor/github.com/olivere/elastic/indices_flush.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// Flush allows to flush one or more indices. The flush process of an index
-// basically frees memory from the index by flushing data to the index
-// storage and clearing the internal transaction log.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-flush.html
-// for details.
-type IndicesFlushService struct {
- client *Client
- pretty bool
- index []string
- force *bool
- waitIfOngoing *bool
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
-}
-
-// NewIndicesFlushService creates a new IndicesFlushService.
-func NewIndicesFlushService(client *Client) *IndicesFlushService {
- return &IndicesFlushService{
- client: client,
- index: make([]string, 0),
- }
-}
-
-// Index is a list of index names; use `_all` or empty string for all indices.
-func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Force indicates whether a flush should be forced even if it is not
-// necessarily needed ie. if no changes will be committed to the index.
-// This is useful if transaction log IDs should be incremented even if
-// no uncommitted changes are present. (This setting can be considered as internal).
-func (s *IndicesFlushService) Force(force bool) *IndicesFlushService {
- s.force = &force
- return s
-}
-
-// WaitIfOngoing, if set to true, indicates that the flush operation will
-// block until the flush can be executed if another flush operation is
-// already executing. The default is false and will cause an exception
-// to be thrown on the shard level if another flush operation is already running..
-func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService {
- s.waitIfOngoing = &waitIfOngoing
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices expression
-// resolves into no concrete indices. (This includes `_all` string or when
-// no indices have been specified).
-func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards specifies whether to expand wildcard expression to
-// concrete indices that are open, closed or both..
-func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesFlushService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
-
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_flush", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_flush"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.force != nil {
- params.Set("force", fmt.Sprintf("%v", *s.force))
- }
- if s.waitIfOngoing != nil {
- params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesFlushService) Validate() error {
- return nil
-}
-
-// Do executes the service.
-func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesFlushResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Result of a flush request.
-
-type IndicesFlushResponse struct {
- Shards shardsInfo `json:"_shards"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_flush_test.go b/vendor/github.com/olivere/elastic/indices_flush_test.go
deleted file mode 100644
index afefd1251..000000000
--- a/vendor/github.com/olivere/elastic/indices_flush_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestFlush(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // Flush all indices
- res, err := client.Flush().Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Errorf("expected res to be != nil; got: %v", res)
- }
-}
-
-func TestFlushBuildURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Expected string
- ExpectValidateFailure bool
- }{
- {
- []string{},
- "/_flush",
- false,
- },
- {
- []string{"index1"},
- "/index1/_flush",
- false,
- },
- {
- []string{"index1", "index2"},
- "/index1%2Cindex2/_flush",
- false,
- },
- }
-
- for i, test := range tests {
- err := NewIndicesFlushService(client).Index(test.Indices...).Validate()
- if err == nil && test.ExpectValidateFailure {
- t.Errorf("case #%d: expected validate to fail", i+1)
- continue
- }
- if err != nil && !test.ExpectValidateFailure {
- t.Errorf("case #%d: expected validate to succeed", i+1)
- continue
- }
- if !test.ExpectValidateFailure {
- path, _, err := NewIndicesFlushService(client).Index(test.Indices...).buildURL()
- if err != nil {
- t.Fatalf("case #%d: %v", i+1, err)
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_forcemerge.go b/vendor/github.com/olivere/elastic/indices_forcemerge.go
deleted file mode 100644
index 0e999cf19..000000000
--- a/vendor/github.com/olivere/elastic/indices_forcemerge.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesForcemergeService allows to force merging of one or more indices.
-// The merge relates to the number of segments a Lucene index holds
-// within each shard. The force merge operation allows to reduce the number
-// of segments by merging them.
-//
-// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-forcemerge.html
-// for more information.
-type IndicesForcemergeService struct {
- client *Client
- pretty bool
- index []string
- allowNoIndices *bool
- expandWildcards string
- flush *bool
- ignoreUnavailable *bool
- maxNumSegments interface{}
- onlyExpungeDeletes *bool
- operationThreading interface{}
-}
-
-// NewIndicesForcemergeService creates a new IndicesForcemergeService.
-func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService {
- return &IndicesForcemergeService{
- client: client,
- index: make([]string, 0),
- }
-}
-
-// Index is a list of index names; use `_all` or empty string to perform
-// the operation on all indices.
-func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService {
- if s.index == nil {
- s.index = make([]string, 0)
- }
- s.index = append(s.index, index...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// (This includes `_all` string or when no indices have been specified).
-func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both..
-func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Flush specifies whether the index should be flushed after performing
-// the operation (default: true).
-func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService {
- s.flush = &flush
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should
-// be ignored when unavailable (missing or closed).
-func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// MaxNumSegments specifies the number of segments the index should be
-// merged into (default: dynamic).
-func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService {
- s.maxNumSegments = maxNumSegments
- return s
-}
-
-// OnlyExpungeDeletes specifies whether the operation should only expunge
-// deleted documents.
-func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService {
- s.onlyExpungeDeletes = &onlyExpungeDeletes
- return s
-}
-
-func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService {
- s.operationThreading = operationThreading
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- // Build URL
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_forcemerge"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.flush != nil {
- params.Set("flush", fmt.Sprintf("%v", *s.flush))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.maxNumSegments != nil {
- params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments))
- }
- if s.onlyExpungeDeletes != nil {
- params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
- }
- if s.operationThreading != nil {
- params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesForcemergeService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesForcemergeResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do.
-type IndicesForcemergeResponse struct {
- Shards shardsInfo `json:"_shards"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_forcemerge_test.go b/vendor/github.com/olivere/elastic/indices_forcemerge_test.go
deleted file mode 100644
index 6615d4dc6..000000000
--- a/vendor/github.com/olivere/elastic/indices_forcemerge_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesForcemergeBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Indices []string
- Expected string
- }{
- {
- []string{},
- "/_forcemerge",
- },
- {
- []string{"index1"},
- "/index1/_forcemerge",
- },
- {
- []string{"index1", "index2"},
- "/index1%2Cindex2/_forcemerge",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.Forcemerge().Index(test.Indices...).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestIndicesForcemerge(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- /*
- if !ok {
- t.Fatalf("expected forcemerge to succeed; got: %v", ok)
- }
- */
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get.go b/vendor/github.com/olivere/elastic/indices_get.go
deleted file mode 100644
index cb4e449d5..000000000
--- a/vendor/github.com/olivere/elastic/indices_get.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesGetService retrieves information about one or more indices.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-index.html
-// for more details.
-type IndicesGetService struct {
- client *Client
- pretty bool
- index []string
- feature []string
- local *bool
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
- flatSettings *bool
- human *bool
-}
-
-// NewIndicesGetService creates a new IndicesGetService.
-func NewIndicesGetService(client *Client) *IndicesGetService {
- return &IndicesGetService{
- client: client,
- index: make([]string, 0),
- feature: make([]string, 0),
- }
-}
-
-// Index is a list of index names.
-func (s *IndicesGetService) Index(indices ...string) *IndicesGetService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Feature is a list of features.
-func (s *IndicesGetService) Feature(features ...string) *IndicesGetService {
- s.feature = append(s.feature, features...)
- return s
-}
-
-// Local indicates whether to return local information, i.e. do not retrieve
-// the state from master node (default: false).
-func (s *IndicesGetService) Local(local bool) *IndicesGetService {
- s.local = &local
- return s
-}
-
-// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
-func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard expression
-// resolves to no concrete indices (default: false).
-func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether wildcard expressions should get
-// expanded to open or closed indices (default: open).
-func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-/* Disabled because serialization would fail in that case. */
-/*
-// FlatSettings make the service return settings in flat format (default: false).
-func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService {
- s.flatSettings = &flatSettings
- return s
-}
-*/
-
-// Human indicates whether to return version and creation date values
-// in human-readable format (default: false).
-func (s *IndicesGetService) Human(human bool) *IndicesGetService {
- s.human = &human
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesGetService) buildURL() (string, url.Values, error) {
- var err error
- var path string
- var index []string
-
- if len(s.index) > 0 {
- index = s.index
- } else {
- index = []string{"_all"}
- }
-
- if len(s.feature) > 0 {
- // Build URL
- path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
- "index": strings.Join(index, ","),
- "feature": strings.Join(s.feature, ","),
- })
- } else {
- // Build URL
- path, err = uritemplates.Expand("/{index}", map[string]string{
- "index": strings.Join(index, ","),
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- if s.human != nil {
- params.Set("human", fmt.Sprintf("%v", *s.human))
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesGetService) Validate() error {
- var invalid []string
- if len(s.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- var ret map[string]*IndicesGetResponse
- if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesGetResponse is part of the response of IndicesGetService.Do.
-type IndicesGetResponse struct {
- Aliases map[string]interface{} `json:"aliases"`
- Mappings map[string]interface{} `json:"mappings"`
- Settings map[string]interface{} `json:"settings"`
- Warmers map[string]interface{} `json:"warmers"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_aliases.go b/vendor/github.com/olivere/elastic/indices_get_aliases.go
deleted file mode 100644
index 68b186358..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_aliases.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// AliasesService returns the aliases associated with one or more indices.
-// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html.
-type AliasesService struct {
- client *Client
- index []string
- pretty bool
-}
-
-// NewAliasesService instantiates a new AliasesService.
-func NewAliasesService(client *Client) *AliasesService {
- builder := &AliasesService{
- client: client,
- }
- return builder
-}
-
-// Pretty asks Elasticsearch to indent the returned JSON.
-func (s *AliasesService) Pretty(pretty bool) *AliasesService {
- s.pretty = pretty
- return s
-}
-
-// Index adds one or more indices.
-func (s *AliasesService) Index(index ...string) *AliasesService {
- s.index = append(s.index, index...)
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *AliasesService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_alias", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_alias"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- return path, params, nil
-}
-
-func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) {
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // {
- // "indexName" : {
- // "aliases" : {
- // "alias1" : { },
- // "alias2" : { }
- // }
- // },
- // "indexName2" : {
- // ...
- // },
- // }
- indexMap := make(map[string]interface{})
- if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil {
- return nil, err
- }
-
- // Each (indexName, _)
- ret := &AliasesResult{
- Indices: make(map[string]indexResult),
- }
- for indexName, indexData := range indexMap {
- indexOut, found := ret.Indices[indexName]
- if !found {
- indexOut = indexResult{Aliases: make([]aliasResult, 0)}
- }
-
- // { "aliases" : { ... } }
- indexDataMap, ok := indexData.(map[string]interface{})
- if ok {
- aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
- if ok {
- for aliasName, _ := range aliasesData {
- aliasRes := aliasResult{AliasName: aliasName}
- indexOut.Aliases = append(indexOut.Aliases, aliasRes)
- }
- }
- }
-
- ret.Indices[indexName] = indexOut
- }
-
- return ret, nil
-}
-
-// -- Result of an alias request.
-
-type AliasesResult struct {
- Indices map[string]indexResult
-}
-
-type indexResult struct {
- Aliases []aliasResult
-}
-
-type aliasResult struct {
- AliasName string
-}
-
-func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
- var indices []string
- for indexName, indexInfo := range ar.Indices {
- for _, aliasInfo := range indexInfo.Aliases {
- if aliasInfo.AliasName == aliasName {
- indices = append(indices, indexName)
- }
- }
- }
- return indices
-}
-
-func (ir indexResult) HasAlias(aliasName string) bool {
- for _, alias := range ir.Aliases {
- if alias.AliasName == aliasName {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_aliases_test.go b/vendor/github.com/olivere/elastic/indices_get_aliases_test.go
deleted file mode 100644
index 2c8da9b7f..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_aliases_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestAliasesBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Indices []string
- Expected string
- }{
- {
- []string{},
- "/_alias",
- },
- {
- []string{"index1"},
- "/index1/_alias",
- },
- {
- []string{"index1", "index2"},
- "/index1%2Cindex2/_alias",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.Aliases().Index(test.Indices...).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestAliases(t *testing.T) {
- var err error
-
- //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- // Some tweets
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
- tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
-
- // Add tweets to first index
- _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- // Add tweets to second index
- _, err = client.Index().Index(testIndexName2).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Flush
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Flush().Index(testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Alias should not yet exist
- aliasesResult1, err := client.Aliases().
- Index(testIndexName, testIndexName2).
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if len(aliasesResult1.Indices) != 2 {
- t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices))
- }
- for indexName, indexDetails := range aliasesResult1.Indices {
- if len(indexDetails.Aliases) != 0 {
- t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
- }
- }
-
- // Add both indices to a new alias
- aliasCreate, err := client.Alias().
- Add(testIndexName, testAliasName).
- Add(testIndexName2, testAliasName).
- //Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !aliasCreate.Acknowledged {
- t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
- }
-
- // Alias should now exist
- aliasesResult2, err := client.Aliases().
- Index(testIndexName, testIndexName2).
- //Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if len(aliasesResult2.Indices) != 2 {
- t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
- }
- for indexName, indexDetails := range aliasesResult2.Indices {
- if len(indexDetails.Aliases) != 1 {
- t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
- }
- }
-
- // Check the reverse function:
- indexInfo1, found := aliasesResult2.Indices[testIndexName]
- if !found {
- t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
- }
- aliasFound := indexInfo1.HasAlias(testAliasName)
- if !aliasFound {
- t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound)
- }
-
- // Check the reverse function:
- indexInfo2, found := aliasesResult2.Indices[testIndexName2]
- if !found {
- t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
- }
- aliasFound = indexInfo2.HasAlias(testAliasName)
- if !aliasFound {
- t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound)
- }
-
- // Remove first index should remove two tweets, so should only yield 1
- aliasRemove1, err := client.Alias().
- Remove(testIndexName, testAliasName).
- //Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !aliasRemove1.Acknowledged {
- t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
- }
-
- // Alias should now exist only for index 2
- aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if len(aliasesResult3.Indices) != 2 {
- t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices))
- }
- for indexName, indexDetails := range aliasesResult3.Indices {
- if indexName == testIndexName {
- if len(indexDetails.Aliases) != 0 {
- t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
- }
- } else if indexName == testIndexName2 {
- if len(indexDetails.Aliases) != 1 {
- t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
- }
- } else {
- t.Errorf("got index %s", indexName)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_field_mapping.go b/vendor/github.com/olivere/elastic/indices_get_field_mapping.go
deleted file mode 100644
index e3b7eac07..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_field_mapping.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesGetFieldMappingService retrieves the mapping definitions for the fields in an index
-// or index/type.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-field-mapping.html
-// for details.
-type IndicesGetFieldMappingService struct {
- client *Client
- pretty bool
- index []string
- typ []string
- field []string
- local *bool
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
-}
-
-// NewGetFieldMappingService is an alias for NewIndicesGetFieldMappingService.
-// Use NewIndicesGetFieldMappingService.
-func NewGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
- return NewIndicesGetFieldMappingService(client)
-}
-
-// NewIndicesGetFieldMappingService creates a new IndicesGetFieldMappingService.
-func NewIndicesGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
- return &IndicesGetFieldMappingService{
- client: client,
- }
-}
-
-// Index is a list of index names.
-func (s *IndicesGetFieldMappingService) Index(indices ...string) *IndicesGetFieldMappingService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Type is a list of document types.
-func (s *IndicesGetFieldMappingService) Type(types ...string) *IndicesGetFieldMappingService {
- s.typ = append(s.typ, types...)
- return s
-}
-
-// Field is a list of fields.
-func (s *IndicesGetFieldMappingService) Field(fields ...string) *IndicesGetFieldMappingService {
- s.field = append(s.field, fields...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// This includes `_all` string or when no indices have been specified.
-func (s *IndicesGetFieldMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetFieldMappingService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both..
-func (s *IndicesGetFieldMappingService) ExpandWildcards(expandWildcards string) *IndicesGetFieldMappingService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Local indicates whether to return local information, do not retrieve
-// the state from master node (default: false).
-func (s *IndicesGetFieldMappingService) Local(local bool) *IndicesGetFieldMappingService {
- s.local = &local
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesGetFieldMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetFieldMappingService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesGetFieldMappingService) Pretty(pretty bool) *IndicesGetFieldMappingService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesGetFieldMappingService) buildURL() (string, url.Values, error) {
- var index, typ, field []string
-
- if len(s.index) > 0 {
- index = s.index
- } else {
- index = []string{"_all"}
- }
-
- if len(s.typ) > 0 {
- typ = s.typ
- } else {
- typ = []string{"_all"}
- }
-
- if len(s.field) > 0 {
- field = s.field
- } else {
- field = []string{"*"}
- }
-
- // Build URL
- path, err := uritemplates.Expand("/{index}/_mapping/{type}/field/{field}", map[string]string{
- "index": strings.Join(index, ","),
- "type": strings.Join(typ, ","),
- "field": strings.Join(field, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesGetFieldMappingService) Validate() error {
- return nil
-}
-
-// Do executes the operation. It returns mapping definitions for an index
-// or index/type.
-func (s *IndicesGetFieldMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
- var ret map[string]interface{}
-
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go b/vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go
deleted file mode 100644
index 62770e030..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "testing"
-)
-
-func TestIndicesGetFieldMappingURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Types []string
- Fields []string
- Expected string
- }{
- {
- []string{},
- []string{},
- []string{},
- "/_all/_mapping/_all/field/%2A",
- },
- {
- []string{},
- []string{"tweet"},
- []string{"message"},
- "/_all/_mapping/tweet/field/message",
- },
- {
- []string{"twitter"},
- []string{"tweet"},
- []string{"*.id"},
- "/twitter/_mapping/tweet/field/%2A.id",
- },
- {
- []string{"store-1", "store-2"},
- []string{"tweet", "user"},
- []string{"message", "*.id"},
- "/store-1%2Cstore-2/_mapping/tweet%2Cuser/field/message%2C%2A.id",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.GetFieldMapping().Index(test.Indices...).Type(test.Types...).Field(test.Fields...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_mapping.go b/vendor/github.com/olivere/elastic/indices_get_mapping.go
deleted file mode 100644
index 7f9c9cb22..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_mapping.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesGetMappingService retrieves the mapping definitions for an index or
-// index/type.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-mapping.html
-// for details.
-type IndicesGetMappingService struct {
- client *Client
- pretty bool
- index []string
- typ []string
- local *bool
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
-}
-
-// NewGetMappingService is an alias for NewIndicesGetMappingService.
-// Use NewIndicesGetMappingService.
-func NewGetMappingService(client *Client) *IndicesGetMappingService {
- return NewIndicesGetMappingService(client)
-}
-
-// NewIndicesGetMappingService creates a new IndicesGetMappingService.
-func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService {
- return &IndicesGetMappingService{
- client: client,
- index: make([]string, 0),
- typ: make([]string, 0),
- }
-}
-
-// Index is a list of index names.
-func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Type is a list of document types.
-func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService {
- s.typ = append(s.typ, types...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// This includes `_all` string or when no indices have been specified.
-func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both..
-func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Local indicates whether to return local information, do not retrieve
-// the state from master node (default: false).
-func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService {
- s.local = &local
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) {
- var index, typ []string
-
- if len(s.index) > 0 {
- index = s.index
- } else {
- index = []string{"_all"}
- }
-
- if len(s.typ) > 0 {
- typ = s.typ
- } else {
- typ = []string{"_all"}
- }
-
- // Build URL
- path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
- "index": strings.Join(index, ","),
- "type": strings.Join(typ, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesGetMappingService) Validate() error {
- return nil
-}
-
-// Do executes the operation. It returns mapping definitions for an index
-// or index/type.
-func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- var ret map[string]interface{}
- if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_mapping_test.go b/vendor/github.com/olivere/elastic/indices_get_mapping_test.go
deleted file mode 100644
index 5ec54e7fb..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_mapping_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "testing"
-)
-
-func TestIndicesGetMappingURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Types []string
- Expected string
- }{
- {
- []string{},
- []string{},
- "/_all/_mapping/_all",
- },
- {
- []string{},
- []string{"tweet"},
- "/_all/_mapping/tweet",
- },
- {
- []string{"twitter"},
- []string{"tweet"},
- "/twitter/_mapping/tweet",
- },
- {
- []string{"store-1", "store-2"},
- []string{"tweet", "user"},
- "/store-1%2Cstore-2/_mapping/tweet%2Cuser",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_settings.go b/vendor/github.com/olivere/elastic/indices_get_settings.go
deleted file mode 100644
index 06fce0dfa..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_settings.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesGetSettingsService allows to retrieve settings of one
-// or more indices.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-settings.html
-// for more details.
-type IndicesGetSettingsService struct {
- client *Client
- pretty bool
- index []string
- name []string
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
- flatSettings *bool
- local *bool
-}
-
-// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
-func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
- return &IndicesGetSettingsService{
- client: client,
- index: make([]string, 0),
- name: make([]string, 0),
- }
-}
-
-// Index is a list of index names; use `_all` or empty string to perform
-// the operation on all indices.
-func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Name are the names of the settings that should be included.
-func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
- s.name = append(s.name, name...)
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should
-// be ignored when unavailable (missing or closed).
-func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// (This includes `_all` string or when no indices have been specified).
-func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression
-// to concrete indices that are open, closed or both.
-// Options: open, closed, none, all. Default: open,closed.
-func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// FlatSettings indicates whether to return settings in flat format (default: false).
-func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
- s.flatSettings = &flatSettings
- return s
-}
-
-// Local indicates whether to return local information, do not retrieve
-// the state from master node (default: false).
-func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
- s.local = &local
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
- var err error
- var path string
- var index []string
-
- if len(s.index) > 0 {
- index = s.index
- } else {
- index = []string{"_all"}
- }
-
- if len(s.name) > 0 {
- // Build URL
- path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
- "index": strings.Join(index, ","),
- "name": strings.Join(s.name, ","),
- })
- } else {
- // Build URL
- path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
- "index": strings.Join(index, ","),
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesGetSettingsService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- var ret map[string]*IndicesGetSettingsResponse
- if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
-type IndicesGetSettingsResponse struct {
- Settings map[string]interface{} `json:"settings"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_settings_test.go b/vendor/github.com/olivere/elastic/indices_get_settings_test.go
deleted file mode 100644
index 7c6995a28..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_settings_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndexGetSettingsURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Names []string
- Expected string
- }{
- {
- []string{},
- []string{},
- "/_all/_settings",
- },
- {
- []string{},
- []string{"index.merge.*"},
- "/_all/_settings/index.merge.%2A",
- },
- {
- []string{"twitter-*"},
- []string{"index.merge.*", "_settings"},
- "/twitter-%2A/_settings/index.merge.%2A%2C_settings",
- },
- {
- []string{"store-1", "store-2"},
- []string{"index.merge.*", "_settings"},
- "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestIndexGetSettingsService(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "1.4.0" {
- t.Skip("Index Get API is available since 1.4")
- return
- }
-
- res, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatalf("expected result; got: %v", res)
- }
- info, found := res[testIndexName]
- if !found {
- t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
- }
- if info == nil {
- t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
- }
- if info.Settings == nil {
- t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_template.go b/vendor/github.com/olivere/elastic/indices_get_template.go
deleted file mode 100644
index ad3a091a0..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_template.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesGetTemplateService returns an index template.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
-type IndicesGetTemplateService struct {
- client *Client
- pretty bool
- name []string
- flatSettings *bool
- local *bool
-}
-
-// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
-func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
- return &IndicesGetTemplateService{
- client: client,
- name: make([]string, 0),
- }
-}
-
-// Name is the name of the index template.
-func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
- s.name = append(s.name, name...)
- return s
-}
-
-// FlatSettings is returns settings in flat format (default: false).
-func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
- s.flatSettings = &flatSettings
- return s
-}
-
-// Local indicates whether to return local information, i.e. do not retrieve
-// the state from master node (default: false).
-func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
- s.local = &local
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.name) > 0 {
- path, err = uritemplates.Expand("/_template/{name}", map[string]string{
- "name": strings.Join(s.name, ","),
- })
- } else {
- path = "/_template"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesGetTemplateService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- var ret map[string]*IndicesGetTemplateResponse
- if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
-type IndicesGetTemplateResponse struct {
- Order int `json:"order,omitempty"`
- Version int `json:"version,omitempty"`
- Template string `json:"template,omitempty"`
- Settings map[string]interface{} `json:"settings,omitempty"`
- Mappings map[string]interface{} `json:"mappings,omitempty"`
- Aliases map[string]interface{} `json:"aliases,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_template_test.go b/vendor/github.com/olivere/elastic/indices_get_template_test.go
deleted file mode 100644
index c884ec1cb..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_template_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "testing"
-)
-
-func TestIndexGetTemplateURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Names []string
- Expected string
- }{
- {
- []string{},
- "/_template",
- },
- {
- []string{"index1"},
- "/_template/index1",
- },
- {
- []string{"index1", "index2"},
- "/_template/index1%2Cindex2",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_get_test.go b/vendor/github.com/olivere/elastic/indices_get_test.go
deleted file mode 100644
index 6d37fca6e..000000000
--- a/vendor/github.com/olivere/elastic/indices_get_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesGetValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No index name -> fail with error
- res, err := NewIndicesGetService(client).Index("").Do(context.TODO())
- if err == nil {
- t.Fatalf("expected IndicesGet to fail without index name")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-}
-
-func TestIndicesGetURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Features []string
- Expected string
- }{
- {
- []string{},
- []string{},
- "/_all",
- },
- {
- []string{},
- []string{"_mappings"},
- "/_all/_mappings",
- },
- {
- []string{"twitter"},
- []string{"_mappings", "_settings"},
- "/twitter/_mappings%2C_settings",
- },
- {
- []string{"store-1", "store-2"},
- []string{"_mappings", "_settings"},
- "/store-1%2Cstore-2/_mappings%2C_settings",
- },
- }
-
- for _, test := range tests {
- path, _, err := NewIndicesGetService(client).Index(test.Indices...).Feature(test.Features...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestIndicesGetService(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "1.4.0" {
- t.Skip("Index Get API is available since 1.4")
- return
- }
-
- res, err := client.IndexGet().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatalf("expected result; got: %v", res)
- }
- info, found := res[testIndexName]
- if !found {
- t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
- }
- if info == nil {
- t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
- }
- if info.Mappings == nil {
- t.Errorf("expected mappings to be != nil; got: %v", info.Mappings)
- }
- if info.Settings == nil {
- t.Errorf("expected settings to be != nil; got: %v", info.Settings)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_open.go b/vendor/github.com/olivere/elastic/indices_open.go
deleted file mode 100644
index 1b58c5721..000000000
--- a/vendor/github.com/olivere/elastic/indices_open.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesOpenService opens an index.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-open-close.html
-// for details.
-type IndicesOpenService struct {
- client *Client
- pretty bool
- index string
- timeout string
- masterTimeout string
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
-}
-
-// NewIndicesOpenService creates and initializes a new IndicesOpenService.
-func NewIndicesOpenService(client *Client) *IndicesOpenService {
- return &IndicesOpenService{client: client}
-}
-
-// Index is the name of the index to open.
-func (s *IndicesOpenService) Index(index string) *IndicesOpenService {
- s.index = index
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService {
- s.timeout = timeout
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should
-// be ignored when unavailable (missing or closed).
-func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// (This includes `_all` string or when no indices have been specified).
-func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both..
-func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesOpenService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{index}/_open", map[string]string{
- "index": s.index,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
-
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesOpenService) Validate() error {
- var invalid []string
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesOpenResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesOpenResponse is the response of IndicesOpenService.Do.
-type IndicesOpenResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_open_test.go b/vendor/github.com/olivere/elastic/indices_open_test.go
deleted file mode 100644
index aab6c5c19..000000000
--- a/vendor/github.com/olivere/elastic/indices_open_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesOpenValidate(t *testing.T) {
- client := setupTestClient(t)
-
- // No index name -> fail with error
- res, err := NewIndicesOpenService(client).Do(context.TODO())
- if err == nil {
- t.Fatalf("expected IndicesOpen to fail without index name")
- }
- if res != nil {
- t.Fatalf("expected result to be == nil; got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_put_alias.go b/vendor/github.com/olivere/elastic/indices_put_alias.go
deleted file mode 100644
index 12f8e1bd5..000000000
--- a/vendor/github.com/olivere/elastic/indices_put_alias.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-)
-
-// -- Actions --
-
-// AliasAction is an action to apply to an alias, e.g. "add" or "remove".
-type AliasAction interface {
- Source() (interface{}, error)
-}
-
-// AliasAddAction is an action to add to an alias.
-type AliasAddAction struct {
- index []string // index name(s)
- alias string // alias name
- filter Query
- routing string
- searchRouting string
- indexRouting string
-}
-
-// NewAliasAddAction returns an action to add an alias.
-func NewAliasAddAction(alias string) *AliasAddAction {
- return &AliasAddAction{
- alias: alias,
- }
-}
-
-// Index associates one or more indices to the alias.
-func (a *AliasAddAction) Index(index ...string) *AliasAddAction {
- a.index = append(a.index, index...)
- return a
-}
-
-func (a *AliasAddAction) removeBlankIndexNames() {
- var indices []string
- for _, index := range a.index {
- if len(index) > 0 {
- indices = append(indices, index)
- }
- }
- a.index = indices
-}
-
-// Filter associates a filter to the alias.
-func (a *AliasAddAction) Filter(filter Query) *AliasAddAction {
- a.filter = filter
- return a
-}
-
-// Routing associates a routing value to the alias.
-// This basically sets index and search routing to the same value.
-func (a *AliasAddAction) Routing(routing string) *AliasAddAction {
- a.routing = routing
- return a
-}
-
-// IndexRouting associates an index routing value to the alias.
-func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction {
- a.indexRouting = routing
- return a
-}
-
-// SearchRouting associates a search routing value to the alias.
-func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction {
- a.searchRouting = strings.Join(routing, ",")
- return a
-}
-
-// Validate checks if the operation is valid.
-func (a *AliasAddAction) Validate() error {
- var invalid []string
- if len(a.alias) == 0 {
- invalid = append(invalid, "Alias")
- }
- if len(a.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Source returns the JSON-serializable data.
-func (a *AliasAddAction) Source() (interface{}, error) {
- a.removeBlankIndexNames()
- if err := a.Validate(); err != nil {
- return nil, err
- }
- src := make(map[string]interface{})
- act := make(map[string]interface{})
- src["add"] = act
- act["alias"] = a.alias
- switch len(a.index) {
- case 1:
- act["index"] = a.index[0]
- default:
- act["indices"] = a.index
- }
- if a.filter != nil {
- f, err := a.filter.Source()
- if err != nil {
- return nil, err
- }
- act["filter"] = f
- }
- if len(a.routing) > 0 {
- act["routing"] = a.routing
- }
- if len(a.indexRouting) > 0 {
- act["index_routing"] = a.indexRouting
- }
- if len(a.searchRouting) > 0 {
- act["search_routing"] = a.searchRouting
- }
- return src, nil
-}
-
-// AliasRemoveAction is an action to remove an alias.
-type AliasRemoveAction struct {
- index []string // index name(s)
- alias string // alias name
-}
-
-// NewAliasRemoveAction returns an action to remove an alias.
-func NewAliasRemoveAction(alias string) *AliasRemoveAction {
- return &AliasRemoveAction{
- alias: alias,
- }
-}
-
-// Index associates one or more indices to the alias.
-func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction {
- a.index = append(a.index, index...)
- return a
-}
-
-func (a *AliasRemoveAction) removeBlankIndexNames() {
- var indices []string
- for _, index := range a.index {
- if len(index) > 0 {
- indices = append(indices, index)
- }
- }
- a.index = indices
-}
-
-// Validate checks if the operation is valid.
-func (a *AliasRemoveAction) Validate() error {
- var invalid []string
- if len(a.alias) == 0 {
- invalid = append(invalid, "Alias")
- }
- if len(a.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Source returns the JSON-serializable data.
-func (a *AliasRemoveAction) Source() (interface{}, error) {
- a.removeBlankIndexNames()
- if err := a.Validate(); err != nil {
- return nil, err
- }
- src := make(map[string]interface{})
- act := make(map[string]interface{})
- src["remove"] = act
- act["alias"] = a.alias
- switch len(a.index) {
- case 1:
- act["index"] = a.index[0]
- default:
- act["indices"] = a.index
- }
- return src, nil
-}
-
-// -- Service --
-
-// AliasService enables users to add or remove an alias.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-aliases.html
-// for details.
-type AliasService struct {
- client *Client
- actions []AliasAction
- pretty bool
-}
-
-// NewAliasService implements a service to manage aliases.
-func NewAliasService(client *Client) *AliasService {
- builder := &AliasService{
- client: client,
- }
- return builder
-}
-
-// Pretty asks Elasticsearch to indent the HTTP response.
-func (s *AliasService) Pretty(pretty bool) *AliasService {
- s.pretty = pretty
- return s
-}
-
-// Add adds an alias to an index.
-func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
- action := NewAliasAddAction(aliasName).Index(indexName)
- s.actions = append(s.actions, action)
- return s
-}
-
-// Add adds an alias to an index and associates a filter to the alias.
-func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService {
- action := NewAliasAddAction(aliasName).Index(indexName).Filter(filter)
- s.actions = append(s.actions, action)
- return s
-}
-
-// Remove removes an alias.
-func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
- action := NewAliasRemoveAction(aliasName).Index(indexName)
- s.actions = append(s.actions, action)
- return s
-}
-
-// Action accepts one or more AliasAction instances which can be
-// of type AliasAddAction or AliasRemoveAction.
-func (s *AliasService) Action(action ...AliasAction) *AliasService {
- s.actions = append(s.actions, action...)
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *AliasService) buildURL() (string, url.Values, error) {
- path := "/_aliases"
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- return path, params, nil
-}
-
-// Do executes the command.
-func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) {
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Body with actions
- body := make(map[string]interface{})
- var actions []interface{}
- for _, action := range s.actions {
- src, err := action.Source()
- if err != nil {
- return nil, err
- }
- actions = append(actions, src)
- }
- body["actions"] = actions
-
- // Get response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return results
- ret := new(AliasResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Result of an alias request.
-
-// AliasResult is the outcome of calling Do on AliasService.
-type AliasResult struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_put_alias_test.go b/vendor/github.com/olivere/elastic/indices_put_alias_test.go
deleted file mode 100644
index ada1dfdef..000000000
--- a/vendor/github.com/olivere/elastic/indices_put_alias_test.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-const (
- testAliasName = "elastic-test-alias"
-)
-
-func TestAliasLifecycle(t *testing.T) {
- var err error
-
- client := setupTestClientAndCreateIndex(t)
-
- // Some tweets
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
- tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
-
- // Add tweets to first index
- _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Add tweets to second index
- _, err = client.Index().Index(testIndexName2).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Flush
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Flush().Index(testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Add both indices to a new alias
- aliasCreate, err := client.Alias().
- Add(testIndexName, testAliasName).
- Action(NewAliasAddAction(testAliasName).Index(testIndexName2)).
- //Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !aliasCreate.Acknowledged {
- t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
- }
-
- // Search should return all 3 tweets
- matchAll := NewMatchAllQuery()
- searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult1.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult1.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits)
- }
-
- // Remove first index should remove two tweets, so should only yield 1
- aliasRemove1, err := client.Alias().
- Remove(testIndexName, testAliasName).
- //Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if !aliasRemove1.Acknowledged {
- t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
- }
-
- searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult2.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult2.Hits.TotalHits != 1 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits)
- }
-}
-
-func TestAliasAddAction(t *testing.T) {
- var tests = []struct {
- Action *AliasAddAction
- Expected string
- Invalid bool
- }{
- {
- Action: NewAliasAddAction("").Index(""),
- Invalid: true,
- },
- {
- Action: NewAliasAddAction("alias1").Index(""),
- Invalid: true,
- },
- {
- Action: NewAliasAddAction("").Index("index1"),
- Invalid: true,
- },
- {
- Action: NewAliasAddAction("alias1").Index("index1"),
- Expected: `{"add":{"alias":"alias1","index":"index1"}}`,
- },
- {
- Action: NewAliasAddAction("alias1").Index("index1", "index2"),
- Expected: `{"add":{"alias":"alias1","indices":["index1","index2"]}}`,
- },
- {
- Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1"),
- Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1"}}`,
- },
- {
- Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").IndexRouting("indexRouting1"),
- Expected: `{"add":{"alias":"alias1","index":"index1","index_routing":"indexRouting1","routing":"routing1"}}`,
- },
- {
- Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").SearchRouting("searchRouting1"),
- Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1","search_routing":"searchRouting1"}}`,
- },
- {
- Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").SearchRouting("searchRouting1", "searchRouting2"),
- Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1","search_routing":"searchRouting1,searchRouting2"}}`,
- },
- {
- Action: NewAliasAddAction("alias1").Index("index1").Filter(NewTermQuery("user", "olivere")),
- Expected: `{"add":{"alias":"alias1","filter":{"term":{"user":"olivere"}},"index":"index1"}}`,
- },
- }
-
- for i, tt := range tests {
- src, err := tt.Action.Source()
- if err != nil {
- if !tt.Invalid {
- t.Errorf("#%d: expected to succeed", i)
- }
- } else {
- if tt.Invalid {
- t.Errorf("#%d: expected to fail", i)
- } else {
- dst, err := json.Marshal(src)
- if err != nil {
- t.Fatal(err)
- }
- if want, have := tt.Expected, string(dst); want != have {
- t.Errorf("#%d: expected %s, got %s", i, want, have)
- }
- }
- }
- }
-}
-
-func TestAliasRemoveAction(t *testing.T) {
- var tests = []struct {
- Action *AliasRemoveAction
- Expected string
- Invalid bool
- }{
- {
- Action: NewAliasRemoveAction(""),
- Invalid: true,
- },
- {
- Action: NewAliasRemoveAction("alias1"),
- Invalid: true,
- },
- {
- Action: NewAliasRemoveAction("").Index("index1"),
- Invalid: true,
- },
- {
- Action: NewAliasRemoveAction("alias1").Index("index1"),
- Expected: `{"remove":{"alias":"alias1","index":"index1"}}`,
- },
- {
- Action: NewAliasRemoveAction("alias1").Index("index1", "index2"),
- Expected: `{"remove":{"alias":"alias1","indices":["index1","index2"]}}`,
- },
- }
-
- for i, tt := range tests {
- src, err := tt.Action.Source()
- if err != nil {
- if !tt.Invalid {
- t.Errorf("#%d: expected to succeed", i)
- }
- } else {
- if tt.Invalid {
- t.Errorf("#%d: expected to fail", i)
- } else {
- dst, err := json.Marshal(src)
- if err != nil {
- t.Fatal(err)
- }
- if want, have := tt.Expected, string(dst); want != have {
- t.Errorf("#%d: expected %s, got %s", i, want, have)
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_put_mapping.go b/vendor/github.com/olivere/elastic/indices_put_mapping.go
deleted file mode 100644
index 2f8a35e4c..000000000
--- a/vendor/github.com/olivere/elastic/indices_put_mapping.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesPutMappingService allows to register specific mapping definition
-// for a specific type.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-put-mapping.html
-// for details.
-type IndicesPutMappingService struct {
- client *Client
- pretty bool
- typ string
- index []string
- masterTimeout string
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
- updateAllTypes *bool
- timeout string
- bodyJson map[string]interface{}
- bodyString string
-}
-
-// NewPutMappingService is an alias for NewIndicesPutMappingService.
-// Use NewIndicesPutMappingService.
-func NewPutMappingService(client *Client) *IndicesPutMappingService {
- return NewIndicesPutMappingService(client)
-}
-
-// NewIndicesPutMappingService creates a new IndicesPutMappingService.
-func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService {
- return &IndicesPutMappingService{
- client: client,
- index: make([]string, 0),
- }
-}
-
-// Index is a list of index names the mapping should be added to
-// (supports wildcards); use `_all` or omit to add the mapping on all indices.
-func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Type is the name of the document type.
-func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService {
- s.typ = typ
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService {
- s.timeout = timeout
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices.
-// This includes `_all` string or when no indices have been specified.
-func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// UpdateAllTypes, if true, indicates that all fields that span multiple indices
-// should be updated (default: false).
-func (s *IndicesPutMappingService) UpdateAllTypes(updateAllTypes bool) *IndicesPutMappingService {
- s.updateAllTypes = &updateAllTypes
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson contains the mapping definition.
-func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService {
- s.bodyJson = mapping
- return s
-}
-
-// BodyString is the mapping definition serialized as a string.
-func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService {
- s.bodyString = mapping
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- // Build URL: Typ MUST be specified and is verified in Validate.
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
- "index": strings.Join(s.index, ","),
- "type": s.typ,
- })
- } else {
- path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{
- "type": s.typ,
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.updateAllTypes != nil {
- params.Set("update_all_types", fmt.Sprintf("%v", *s.updateAllTypes))
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesPutMappingService) Validate() error {
- var invalid []string
- if s.typ == "" {
- invalid = append(invalid, "Type")
- }
- if s.bodyString == "" && s.bodyJson == nil {
- invalid = append(invalid, "BodyJson")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "PUT",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(PutMappingResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// PutMappingResponse is the response of IndicesPutMappingService.Do.
-type PutMappingResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_put_mapping_test.go b/vendor/github.com/olivere/elastic/indices_put_mapping_test.go
deleted file mode 100644
index 644e1187a..000000000
--- a/vendor/github.com/olivere/elastic/indices_put_mapping_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestPutMappingURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Type string
- Expected string
- }{
- {
- []string{},
- "doc",
- "/_mapping/doc",
- },
- {
- []string{"*"},
- "doc",
- "/%2A/_mapping/doc",
- },
- {
- []string{"store-1", "store-2"},
- "doc",
- "/store-1%2Cstore-2/_mapping/doc",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestMappingLifecycle(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- //client := setupTestClientAndCreateIndexAndLog(t)
-
- // Create index
- createIndex, err := client.CreateIndex(testIndexName3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createIndex)
- }
-
- mapping := `{
- "doc":{
- "properties":{
- "field":{
- "type":"keyword"
- }
- }
- }
- }`
-
- putresp, err := client.PutMapping().Index(testIndexName3).Type("doc").BodyString(mapping).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected put mapping to succeed; got: %v", err)
- }
- if putresp == nil {
- t.Fatalf("expected put mapping response; got: %v", putresp)
- }
- if !putresp.Acknowledged {
- t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged)
- }
-
- getresp, err := client.GetMapping().Index(testIndexName3).Type("doc").Do(context.TODO())
- if err != nil {
- t.Fatalf("expected get mapping to succeed; got: %v", err)
- }
- if getresp == nil {
- t.Fatalf("expected get mapping response; got: %v", getresp)
- }
- props, ok := getresp[testIndexName3]
- if !ok {
- t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props)
- }
-
- // NOTE There is no Delete Mapping API in Elasticsearch 2.0
-}
diff --git a/vendor/github.com/olivere/elastic/indices_put_settings.go b/vendor/github.com/olivere/elastic/indices_put_settings.go
deleted file mode 100644
index 1283eb669..000000000
--- a/vendor/github.com/olivere/elastic/indices_put_settings.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesPutSettingsService changes specific index level settings in
-// real time.
-//
-// See the documentation at
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-update-settings.html.
-type IndicesPutSettingsService struct {
- client *Client
- pretty bool
- index []string
- allowNoIndices *bool
- expandWildcards string
- flatSettings *bool
- ignoreUnavailable *bool
- masterTimeout string
- bodyJson interface{}
- bodyString string
-}
-
-// NewIndicesPutSettingsService creates a new IndicesPutSettingsService.
-func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService {
- return &IndicesPutSettingsService{
- client: client,
- index: make([]string, 0),
- }
-}
-
-// Index is a list of index names the mapping should be added to
-// (supports wildcards); use `_all` or omit to add the mapping on all indices.
-func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices. (This includes `_all`
-// string or when no indices have been specified).
-func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards specifies whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// FlatSettings indicates whether to return settings in flat format (default: false).
-func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService {
- s.flatSettings = &flatSettings
- return s
-}
-
-// IgnoreUnavailable specifies whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// MasterTimeout is the timeout for connection to master.
-func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is documented as: The index settings to be updated.
-func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is documented as: The index settings to be updated.
-func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
-
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_settings"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesPutSettingsService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "PUT",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesPutSettingsResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do.
-type IndicesPutSettingsResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_put_settings_test.go b/vendor/github.com/olivere/elastic/indices_put_settings_test.go
deleted file mode 100644
index 0ceea3ef8..000000000
--- a/vendor/github.com/olivere/elastic/indices_put_settings_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesPutSettingsBuildURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Expected string
- }{
- {
- []string{},
- "/_settings",
- },
- {
- []string{"*"},
- "/%2A/_settings",
- },
- {
- []string{"store-1", "store-2"},
- "/store-1%2Cstore-2/_settings",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IndexPutSettings().Index(test.Indices...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestIndicesSettingsLifecycle(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- body := `{
- "index":{
- "refresh_interval":"-1"
- }
- }`
-
- // Put settings
- putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected put settings to succeed; got: %v", err)
- }
- if putres == nil {
- t.Fatalf("expected put settings response; got: %v", putres)
- }
- if !putres.Acknowledged {
- t.Fatalf("expected put settings ack; got: %v", putres.Acknowledged)
- }
-
- // Read settings
- getres, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected get mapping to succeed; got: %v", err)
- }
- if getres == nil {
- t.Fatalf("expected get mapping response; got: %v", getres)
- }
-
- // Check settings
- index, found := getres[testIndexName]
- if !found {
- t.Fatalf("expected to return settings for index %q; got: %#v", testIndexName, getres)
- }
- // Retrieve "index" section of the settings for index testIndexName
- sectionIntf, ok := index.Settings["index"]
- if !ok {
- t.Fatalf("expected settings to have %q field; got: %#v", "index", getres)
- }
- section, ok := sectionIntf.(map[string]interface{})
- if !ok {
- t.Fatalf("expected settings to be of type map[string]interface{}; got: %#v", getres)
- }
- refintv, ok := section["refresh_interval"]
- if !ok {
- t.Fatalf(`expected JSON to include "refresh_interval" field; got: %#v`, getres)
- }
- if got, want := refintv, "-1"; got != want {
- t.Fatalf("expected refresh_interval = %v; got: %v", want, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_put_template.go b/vendor/github.com/olivere/elastic/indices_put_template.go
deleted file mode 100644
index c0b959647..000000000
--- a/vendor/github.com/olivere/elastic/indices_put_template.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesPutTemplateService creates or updates index mappings.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
-type IndicesPutTemplateService struct {
- client *Client
- pretty bool
- name string
- cause string
- order interface{}
- version *int
- create *bool
- timeout string
- masterTimeout string
- flatSettings *bool
- bodyJson interface{}
- bodyString string
-}
-
-// NewIndicesPutTemplateService creates a new IndicesPutTemplateService.
-func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService {
- return &IndicesPutTemplateService{
- client: client,
- }
-}
-
-// Name is the name of the index template.
-func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService {
- s.name = name
- return s
-}
-
-// Cause describes the cause for this index template creation. This is currently
-// undocumented, but part of the Java source.
-func (s *IndicesPutTemplateService) Cause(cause string) *IndicesPutTemplateService {
- s.cause = cause
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService {
- s.timeout = timeout
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// FlatSettings indicates whether to return settings in flat format (default: false).
-func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService {
- s.flatSettings = &flatSettings
- return s
-}
-
-// Order is the order for this template when merging multiple matching ones
-// (higher numbers are merged later, overriding the lower numbers).
-func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService {
- s.order = order
- return s
-}
-
-// Version sets the version number for this template.
-func (s *IndicesPutTemplateService) Version(version int) *IndicesPutTemplateService {
- s.version = &version
- return s
-}
-
-// Create indicates whether the index template should only be added if
-// new or can also replace an existing one.
-func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService {
- s.create = &create
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is documented as: The template definition.
-func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is documented as: The template definition.
-func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_template/{name}", map[string]string{
- "name": s.name,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.order != nil {
- params.Set("order", fmt.Sprintf("%v", s.order))
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", *s.version))
- }
- if s.create != nil {
- params.Set("create", fmt.Sprintf("%v", *s.create))
- }
- if s.cause != "" {
- params.Set("cause", s.cause)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesPutTemplateService) Validate() error {
- var invalid []string
- if s.name == "" {
- invalid = append(invalid, "Name")
- }
- if s.bodyString == "" && s.bodyJson == nil {
- invalid = append(invalid, "BodyJson")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "PUT",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesPutTemplateResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
-type IndicesPutTemplateResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_refresh.go b/vendor/github.com/olivere/elastic/indices_refresh.go
deleted file mode 100644
index f6c7f165e..000000000
--- a/vendor/github.com/olivere/elastic/indices_refresh.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// RefreshService explicitly refreshes one or more indices.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-refresh.html.
-type RefreshService struct {
- client *Client
- index []string
- pretty bool
-}
-
-// NewRefreshService creates a new instance of RefreshService.
-func NewRefreshService(client *Client) *RefreshService {
- builder := &RefreshService{
- client: client,
- }
- return builder
-}
-
-// Index specifies the indices to refresh.
-func (s *RefreshService) Index(index ...string) *RefreshService {
- s.index = append(s.index, index...)
- return s
-}
-
-// Pretty asks Elasticsearch to return indented JSON.
-func (s *RefreshService) Pretty(pretty bool) *RefreshService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *RefreshService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_refresh"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- return path, params, nil
-}
-
-// Do executes the request.
-func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) {
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return result
- ret := new(RefreshResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Result of a refresh request.
-
-// RefreshResult is the outcome of RefreshService.Do.
-type RefreshResult struct {
- Shards shardsInfo `json:"_shards,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_refresh_test.go b/vendor/github.com/olivere/elastic/indices_refresh_test.go
deleted file mode 100644
index 8640fb602..000000000
--- a/vendor/github.com/olivere/elastic/indices_refresh_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestRefreshBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Indices []string
- Expected string
- }{
- {
- []string{},
- "/_refresh",
- },
- {
- []string{"index1"},
- "/index1/_refresh",
- },
- {
- []string{"index1", "index2"},
- "/index1%2Cindex2/_refresh",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.Refresh().Index(test.Indices...).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestRefresh(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add some documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Refresh indices
- res, err := client.Refresh(testIndexName, testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected result; got nil")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_rollover.go b/vendor/github.com/olivere/elastic/indices_rollover.go
deleted file mode 100644
index 841b3836f..000000000
--- a/vendor/github.com/olivere/elastic/indices_rollover.go
+++ /dev/null
@@ -1,272 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesRolloverService rolls an alias over to a new index when the
-// existing index is considered to be too large or too old.
-//
-// It is documented at
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-rollover-index.html.
-type IndicesRolloverService struct {
- client *Client
- pretty bool
- dryRun bool
- newIndex string
- alias string
- masterTimeout string
- timeout string
- waitForActiveShards string
- conditions map[string]interface{}
- settings map[string]interface{}
- mappings map[string]interface{}
- bodyJson interface{}
- bodyString string
-}
-
-// NewIndicesRolloverService creates a new IndicesRolloverService.
-func NewIndicesRolloverService(client *Client) *IndicesRolloverService {
- return &IndicesRolloverService{
- client: client,
- conditions: make(map[string]interface{}),
- settings: make(map[string]interface{}),
- mappings: make(map[string]interface{}),
- }
-}
-
-// Alias is the name of the alias to rollover.
-func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService {
- s.alias = alias
- return s
-}
-
-// NewIndex is the name of the rollover index.
-func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService {
- s.newIndex = newIndex
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout sets an explicit operation timeout.
-func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService {
- s.timeout = timeout
- return s
-}
-
-// WaitForActiveShards sets the number of active shards to wait for on the
-// newly created rollover index before the operation returns.
-func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService {
- s.pretty = pretty
- return s
-}
-
-// DryRun, when set, specifies that only conditions are checked without
-// performing the actual rollover.
-func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService {
- s.dryRun = dryRun
- return s
-}
-
-// Conditions allows to specify all conditions as a dictionary.
-func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService {
- s.conditions = conditions
- return s
-}
-
-// AddCondition adds a condition to the rollover decision.
-func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService {
- s.conditions[name] = value
- return s
-}
-
-// AddMaxIndexAgeCondition adds a condition to set the max index age.
-func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService {
- s.conditions["max_age"] = time
- return s
-}
-
-// AddMaxIndexDocsCondition adds a condition to set the max documents in the index.
-func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService {
- s.conditions["max_docs"] = docs
- return s
-}
-
-// Settings adds the index settings.
-func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService {
- s.settings = settings
- return s
-}
-
-// AddSetting adds an index setting.
-func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService {
- s.settings[name] = value
- return s
-}
-
-// Mappings adds the index mappings.
-func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService {
- s.mappings = mappings
- return s
-}
-
-// AddMapping adds a mapping for the given type.
-func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService {
- s.mappings[typ] = mapping
- return s
-}
-
-// BodyJson sets the conditions that needs to be met for executing rollover,
-// specified as a serializable JSON instance which is sent as the body of
-// the request.
-func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService {
- s.bodyJson = body
- return s
-}
-
-// BodyString sets the conditions that needs to be met for executing rollover,
-// specified as a string which is sent as the body of the request.
-func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService {
- s.bodyString = body
- return s
-}
-
-// getBody returns the body of the request, if not explicitly set via
-// BodyJson or BodyString.
-func (s *IndicesRolloverService) getBody() interface{} {
- body := make(map[string]interface{})
- if len(s.conditions) > 0 {
- body["conditions"] = s.conditions
- }
- if len(s.settings) > 0 {
- body["settings"] = s.settings
- }
- if len(s.mappings) > 0 {
- body["mappings"] = s.mappings
- }
- return body
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesRolloverService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if s.newIndex != "" {
- path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{
- "alias": s.alias,
- "new_index": s.newIndex,
- })
- } else {
- path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{
- "alias": s.alias,
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.dryRun {
- params.Set("dry_run", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesRolloverService) Validate() error {
- var invalid []string
- if s.alias == "" {
- invalid = append(invalid, "Alias")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else if s.bodyString != "" {
- body = s.bodyString
- } else {
- body = s.getBody()
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesRolloverResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesRolloverResponse is the response of IndicesRolloverService.Do.
-type IndicesRolloverResponse struct {
- OldIndex string `json:"old_index"`
- NewIndex string `json:"new_index"`
- RolledOver bool `json:"rolled_over"`
- DryRun bool `json:"dry_run"`
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Conditions map[string]bool `json:"conditions"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_rollover_test.go b/vendor/github.com/olivere/elastic/indices_rollover_test.go
deleted file mode 100644
index 81d7099e0..000000000
--- a/vendor/github.com/olivere/elastic/indices_rollover_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestIndicesRolloverBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Alias string
- NewIndex string
- Expected string
- }{
- {
- "logs_write",
- "",
- "/logs_write/_rollover",
- },
- {
- "logs_write",
- "my_new_index_name",
- "/logs_write/_rollover/my_new_index_name",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.RolloverIndex(test.Alias).NewIndex(test.NewIndex).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestIndicesRolloverBodyConditions(t *testing.T) {
- client := setupTestClient(t)
- svc := NewIndicesRolloverService(client).
- Conditions(map[string]interface{}{
- "max_age": "7d",
- "max_docs": 1000,
- })
- data, err := json.Marshal(svc.getBody())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"conditions":{"max_age":"7d","max_docs":1000}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestIndicesRolloverBodyAddCondition(t *testing.T) {
- client := setupTestClient(t)
- svc := NewIndicesRolloverService(client).
- AddCondition("max_age", "7d").
- AddCondition("max_docs", 1000)
- data, err := json.Marshal(svc.getBody())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"conditions":{"max_age":"7d","max_docs":1000}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestIndicesRolloverBodyAddPredefinedConditions(t *testing.T) {
- client := setupTestClient(t)
- svc := NewIndicesRolloverService(client).
- AddMaxIndexAgeCondition("2d").
- AddMaxIndexDocsCondition(1000000)
- data, err := json.Marshal(svc.getBody())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"conditions":{"max_age":"2d","max_docs":1000000}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestIndicesRolloverBodyComplex(t *testing.T) {
- client := setupTestClient(t)
- svc := NewIndicesRolloverService(client).
- AddMaxIndexAgeCondition("2d").
- AddMaxIndexDocsCondition(1000000).
- AddSetting("index.number_of_shards", 2).
- AddMapping("doc", map[string]interface{}{
- "properties": map[string]interface{}{
- "user": map[string]interface{}{
- "type": "keyword",
- },
- },
- })
- data, err := json.Marshal(svc.getBody())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"conditions":{"max_age":"2d","max_docs":1000000},"mappings":{"doc":{"properties":{"user":{"type":"keyword"}}}},"settings":{"index.number_of_shards":2}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_segments.go b/vendor/github.com/olivere/elastic/indices_segments.go
deleted file mode 100644
index 133d1101e..000000000
--- a/vendor/github.com/olivere/elastic/indices_segments.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesSegmentsService provides low level segments information that a
-// Lucene index (shard level) is built with. Allows to be used to provide
-// more information on the state of a shard and an index, possibly
-// optimization information, data "wasted" on deletes, and so on.
-//
-// Find further documentation at
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/indices-segments.html.
-type IndicesSegmentsService struct {
- client *Client
- pretty bool
- index []string
- allowNoIndices *bool
- expandWildcards string
- ignoreUnavailable *bool
- human *bool
- operationThreading interface{}
- verbose *bool
-}
-
-// NewIndicesSegmentsService creates a new IndicesSegmentsService.
-func NewIndicesSegmentsService(client *Client) *IndicesSegmentsService {
- return &IndicesSegmentsService{
- client: client,
- }
-}
-
-// Index is a comma-separated list of index names; use `_all` or empty string
-// to perform the operation on all indices.
-func (s *IndicesSegmentsService) Index(indices ...string) *IndicesSegmentsService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices expression
-// resolves into no concrete indices. (This includes `_all` string or when
-// no indices have been specified).
-func (s *IndicesSegmentsService) AllowNoIndices(allowNoIndices bool) *IndicesSegmentsService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to concrete indices
-// that are open, closed or both.
-func (s *IndicesSegmentsService) ExpandWildcards(expandWildcards string) *IndicesSegmentsService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *IndicesSegmentsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesSegmentsService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Human, when set to true, returns time and byte-values in human-readable format.
-func (s *IndicesSegmentsService) Human(human bool) *IndicesSegmentsService {
- s.human = &human
- return s
-}
-
-// OperationThreading is undocumented in Elasticsearch as of now.
-func (s *IndicesSegmentsService) OperationThreading(operationThreading interface{}) *IndicesSegmentsService {
- s.operationThreading = operationThreading
- return s
-}
-
-// Verbose, when set to true, includes detailed memory usage by Lucene.
-func (s *IndicesSegmentsService) Verbose(verbose bool) *IndicesSegmentsService {
- s.verbose = &verbose
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesSegmentsService) Pretty(pretty bool) *IndicesSegmentsService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesSegmentsService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_segments", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_segments"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.human != nil {
- params.Set("human", fmt.Sprintf("%v", *s.human))
- }
- if s.operationThreading != nil {
- params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
- }
- if s.verbose != nil {
- params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesSegmentsService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesSegmentsService) Do(ctx context.Context) (*IndicesSegmentsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesSegmentsResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesSegmentsResponse is the response of IndicesSegmentsService.Do.
-type IndicesSegmentsResponse struct {
- // Shards provides information returned from shards.
- Shards shardsInfo `json:"_shards"`
-
- // Indices provides a map into the stats of an index.
- // The key of the map is the index name.
- Indices map[string]*IndexSegments `json:"indices,omitempty"`
-}
-
-type IndexSegments struct {
- // Shards provides a map into the shard related information of an index.
- // The key of the map is the number of a specific shard.
- Shards map[string][]*IndexSegmentsShards `json:"shards,omitempty"`
-}
-
-type IndexSegmentsShards struct {
- Routing *IndexSegmentsRouting `json:"routing,omitempty"`
- NumCommittedSegments int64 `json:"num_committed_segments,omitempty"`
- NumSearchSegments int64 `json:"num_search_segments"`
-
- // Segments provides a map into the segment related information of a shard.
- // The key of the map is the specific lucene segment id.
- Segments map[string]*IndexSegmentsDetails `json:"segments,omitempty"`
-}
-
-type IndexSegmentsRouting struct {
- State string `json:"state,omitempty"`
- Primary bool `json:"primary,omitempty"`
- Node string `json:"node,omitempty"`
- RelocatingNode string `json:"relocating_node,omitempty"`
-}
-
-type IndexSegmentsDetails struct {
- Generation int64 `json:"generation,omitempty"`
- NumDocs int64 `json:"num_docs,omitempty"`
- DeletedDocs int64 `json:"deleted_docs,omitempty"`
- Size string `json:"size,omitempty"`
- SizeInBytes int64 `json:"size_in_bytes,omitempty"`
- Memory string `json:"memory,omitempty"`
- MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
- Committed bool `json:"committed,omitempty"`
- Search bool `json:"search,omitempty"`
- Version string `json:"version,omitempty"`
- Compound bool `json:"compound,omitempty"`
- MergeId string `json:"merge_id,omitempty"`
- Sort []*IndexSegmentsSort `json:"sort,omitempty"`
- RAMTree []*IndexSegmentsRamTree `json:"ram_tree,omitempty"`
- Attributes map[string]string `json:"attributes,omitempty"`
-}
-
-type IndexSegmentsSort struct {
- Field string `json:"field,omitempty"`
- Mode string `json:"mode,omitempty"`
- Missing interface{} `json:"missing,omitempty"`
- Reverse bool `json:"reverse,omitempty"`
-}
-
-type IndexSegmentsRamTree struct {
- Description string `json:"description,omitempty"`
- Size string `json:"size,omitempty"`
- SizeInBytes int64 `json:"size_in_bytes,omitempty"`
- Children []*IndexSegmentsRamTree `json:"children,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_segments_test.go b/vendor/github.com/olivere/elastic/indices_segments_test.go
deleted file mode 100644
index 2ec181cc1..000000000
--- a/vendor/github.com/olivere/elastic/indices_segments_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndicesSegments(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Expected string
- }{
- {
- []string{},
- "/_segments",
- },
- {
- []string{"index1"},
- "/index1/_segments",
- },
- {
- []string{"index1", "index2"},
- "/index1%2Cindex2/_segments",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.IndexSegments().Index(test.Indices...).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestIndexSegments(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- segments, err := client.IndexSegments(testIndexName).Pretty(true).Human(true).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if segments == nil {
- t.Fatalf("expected response; got: %v", segments)
- }
- indices, found := segments.Indices[testIndexName]
- if !found {
- t.Fatalf("expected index information about index %v; got: %v", testIndexName, found)
- }
- shards, found := indices.Shards["0"]
- if !found {
- t.Fatalf("expected shard information about index %v", testIndexName)
- }
- if shards == nil {
- t.Fatalf("expected shard information to be != nil for index %v", testIndexName)
- }
- shard := shards[0]
- if shard == nil {
- t.Fatalf("expected shard information to be != nil for shard 0 in index %v", testIndexName)
- }
- if shard.Routing == nil {
- t.Fatalf("expected shard routing information to be != nil for index %v", testIndexName)
- }
- segmentDetail, found := shard.Segments["_0"]
- if !found {
- t.Fatalf("expected segment detail to be != nil for index %v", testIndexName)
- }
- if segmentDetail == nil {
- t.Fatalf("expected segment detail to be != nil for index %v", testIndexName)
- }
- if segmentDetail.NumDocs == 0 {
- t.Fatal("expected segment to contain >= 1 docs")
- }
- if len(segmentDetail.Attributes) == 0 {
- t.Fatalf("expected segment attributes map to contain at least one key, value pair for index %v", testIndexName)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_shrink.go b/vendor/github.com/olivere/elastic/indices_shrink.go
deleted file mode 100644
index 6ea72b281..000000000
--- a/vendor/github.com/olivere/elastic/indices_shrink.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesShrinkService allows you to shrink an existing index into a
-// new index with fewer primary shards.
-//
-// For further details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-shrink-index.html.
-type IndicesShrinkService struct {
- client *Client
- pretty bool
- source string
- target string
- masterTimeout string
- timeout string
- waitForActiveShards string
- bodyJson interface{}
- bodyString string
-}
-
-// NewIndicesShrinkService creates a new IndicesShrinkService.
-func NewIndicesShrinkService(client *Client) *IndicesShrinkService {
- return &IndicesShrinkService{
- client: client,
- }
-}
-
-// Source is the name of the source index to shrink.
-func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService {
- s.source = source
- return s
-}
-
-// Target is the name of the target index to shrink into.
-func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService {
- s.target = target
- return s
-}
-
-// MasterTimeout specifies the timeout for connection to master.
-func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService {
- s.timeout = timeout
- return s
-}
-
-// WaitForActiveShards sets the number of active shards to wait for on
-// the shrunken index before the operation returns.
-func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is the configuration for the target index (`settings` and `aliases`)
-// defined as a JSON-serializable instance to be sent as the request body.
-func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is the configuration for the target index (`settings` and `aliases`)
-// defined as a string to send as the request body.
-func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesShrinkService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{
- "source": s.source,
- "target": s.target,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesShrinkService) Validate() error {
- var invalid []string
- if s.source == "" {
- invalid = append(invalid, "Source")
- }
- if s.target == "" {
- invalid = append(invalid, "Target")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else if s.bodyString != "" {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesShrinkResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesShrinkResponse is the response of IndicesShrinkService.Do.
-type IndicesShrinkResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_shrink_test.go b/vendor/github.com/olivere/elastic/indices_shrink_test.go
deleted file mode 100644
index 06ab7d923..000000000
--- a/vendor/github.com/olivere/elastic/indices_shrink_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestIndicesShrinkBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Source string
- Target string
- Expected string
- }{
- {
- "my_source_index",
- "my_target_index",
- "/my_source_index/_shrink/my_target_index",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.ShrinkIndex(test.Source, test.Target).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/indices_stats.go b/vendor/github.com/olivere/elastic/indices_stats.go
deleted file mode 100644
index 20d35a6d4..000000000
--- a/vendor/github.com/olivere/elastic/indices_stats.go
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IndicesStatsService provides stats on various metrics of one or more
-// indices. See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-stats.html.
-type IndicesStatsService struct {
- client *Client
- pretty bool
- metric []string
- index []string
- level string
- types []string
- completionFields []string
- fielddataFields []string
- fields []string
- groups []string
- human *bool
-}
-
-// NewIndicesStatsService creates a new IndicesStatsService.
-func NewIndicesStatsService(client *Client) *IndicesStatsService {
- return &IndicesStatsService{
- client: client,
- index: make([]string, 0),
- metric: make([]string, 0),
- completionFields: make([]string, 0),
- fielddataFields: make([]string, 0),
- fields: make([]string, 0),
- groups: make([]string, 0),
- types: make([]string, 0),
- }
-}
-
-// Metric limits the information returned the specific metrics. Options are:
-// docs, store, indexing, get, search, completion, fielddata, flush, merge,
-// query_cache, refresh, suggest, and warmer.
-func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService {
- s.metric = append(s.metric, metric...)
- return s
-}
-
-// Index is the list of index names; use `_all` or empty string to perform
-// the operation on all indices.
-func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService {
- s.index = append(s.index, indices...)
- return s
-}
-
-// Type is a list of document types for the `indexing` index metric.
-func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService {
- s.types = append(s.types, types...)
- return s
-}
-
-// Level returns stats aggregated at cluster, index or shard level.
-func (s *IndicesStatsService) Level(level string) *IndicesStatsService {
- s.level = level
- return s
-}
-
-// CompletionFields is a list of fields for `fielddata` and `suggest`
-// index metric (supports wildcards).
-func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService {
- s.completionFields = append(s.completionFields, completionFields...)
- return s
-}
-
-// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
-func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService {
- s.fielddataFields = append(s.fielddataFields, fielddataFields...)
- return s
-}
-
-// Fields is a list of fields for `fielddata` and `completion` index metric
-// (supports wildcards).
-func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService {
- s.fields = append(s.fields, fields...)
- return s
-}
-
-// Groups is a list of search groups for `search` index metric.
-func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService {
- s.groups = append(s.groups, groups...)
- return s
-}
-
-// Human indicates whether to return time and byte values in human-readable format..
-func (s *IndicesStatsService) Human(human bool) *IndicesStatsService {
- s.human = &human
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IndicesStatsService) buildURL() (string, url.Values, error) {
- var err error
- var path string
- if len(s.index) > 0 && len(s.metric) > 0 {
- path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{
- "index": strings.Join(s.index, ","),
- "metric": strings.Join(s.metric, ","),
- })
- } else if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_stats", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else if len(s.metric) > 0 {
- path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{
- "metric": strings.Join(s.metric, ","),
- })
- } else {
- path = "/_stats"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if len(s.groups) > 0 {
- params.Set("groups", strings.Join(s.groups, ","))
- }
- if s.human != nil {
- params.Set("human", fmt.Sprintf("%v", *s.human))
- }
- if s.level != "" {
- params.Set("level", s.level)
- }
- if len(s.types) > 0 {
- params.Set("types", strings.Join(s.types, ","))
- }
- if len(s.completionFields) > 0 {
- params.Set("completion_fields", strings.Join(s.completionFields, ","))
- }
- if len(s.fielddataFields) > 0 {
- params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
- }
- if len(s.fields) > 0 {
- params.Set("fields", strings.Join(s.fields, ","))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IndicesStatsService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IndicesStatsResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IndicesStatsResponse is the response of IndicesStatsService.Do.
-type IndicesStatsResponse struct {
- // Shards provides information returned from shards.
- Shards shardsInfo `json:"_shards"`
-
- // All provides summary stats about all indices.
- All *IndexStats `json:"_all,omitempty"`
-
- // Indices provides a map into the stats of an index. The key of the
- // map is the index name.
- Indices map[string]*IndexStats `json:"indices,omitempty"`
-}
-
-// IndexStats is index stats for a specific index.
-type IndexStats struct {
- Primaries *IndexStatsDetails `json:"primaries,omitempty"`
- Total *IndexStatsDetails `json:"total,omitempty"`
-}
-
-type IndexStatsDetails struct {
- Docs *IndexStatsDocs `json:"docs,omitempty"`
- Store *IndexStatsStore `json:"store,omitempty"`
- Indexing *IndexStatsIndexing `json:"indexing,omitempty"`
- Get *IndexStatsGet `json:"get,omitempty"`
- Search *IndexStatsSearch `json:"search,omitempty"`
- Merges *IndexStatsMerges `json:"merges,omitempty"`
- Refresh *IndexStatsRefresh `json:"refresh,omitempty"`
- Flush *IndexStatsFlush `json:"flush,omitempty"`
- Warmer *IndexStatsWarmer `json:"warmer,omitempty"`
- FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"`
- IdCache *IndexStatsIdCache `json:"id_cache,omitempty"`
- Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"`
- Percolate *IndexStatsPercolate `json:"percolate,omitempty"`
- Completion *IndexStatsCompletion `json:"completion,omitempty"`
- Segments *IndexStatsSegments `json:"segments,omitempty"`
- Translog *IndexStatsTranslog `json:"translog,omitempty"`
- Suggest *IndexStatsSuggest `json:"suggest,omitempty"`
- QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"`
-}
-
-type IndexStatsDocs struct {
- Count int64 `json:"count,omitempty"`
- Deleted int64 `json:"deleted,omitempty"`
-}
-
-type IndexStatsStore struct {
- Size string `json:"size,omitempty"` // human size, e.g. 119.3mb
- SizeInBytes int64 `json:"size_in_bytes,omitempty"`
-}
-
-type IndexStatsIndexing struct {
- IndexTotal int64 `json:"index_total,omitempty"`
- IndexTime string `json:"index_time,omitempty"`
- IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"`
- IndexCurrent int64 `json:"index_current,omitempty"`
- DeleteTotal int64 `json:"delete_total,omitempty"`
- DeleteTime string `json:"delete_time,omitempty"`
- DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"`
- DeleteCurrent int64 `json:"delete_current,omitempty"`
- NoopUpdateTotal int64 `json:"noop_update_total,omitempty"`
-}
-
-type IndexStatsGet struct {
- Total int64 `json:"total,omitempty"`
- GetTime string `json:"get_time,omitempty"`
- TimeInMillis int64 `json:"time_in_millis,omitempty"`
- ExistsTotal int64 `json:"exists_total,omitempty"`
- ExistsTime string `json:"exists_time,omitempty"`
- ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"`
- MissingTotal int64 `json:"missing_total,omitempty"`
- MissingTime string `json:"missing_time,omitempty"`
- MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"`
- Current int64 `json:"current,omitempty"`
-}
-
-type IndexStatsSearch struct {
- OpenContexts int64 `json:"open_contexts,omitempty"`
- QueryTotal int64 `json:"query_total,omitempty"`
- QueryTime string `json:"query_time,omitempty"`
- QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"`
- QueryCurrent int64 `json:"query_current,omitempty"`
- FetchTotal int64 `json:"fetch_total,omitempty"`
- FetchTime string `json:"fetch_time,omitempty"`
- FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"`
- FetchCurrent int64 `json:"fetch_current,omitempty"`
-}
-
-type IndexStatsMerges struct {
- Current int64 `json:"current,omitempty"`
- CurrentDocs int64 `json:"current_docs,omitempty"`
- CurrentSize string `json:"current_size,omitempty"`
- CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"`
- Total int64 `json:"total,omitempty"`
- TotalTime string `json:"total_time,omitempty"`
- TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
- TotalDocs int64 `json:"total_docs,omitempty"`
- TotalSize string `json:"total_size,omitempty"`
- TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"`
-}
-
-type IndexStatsRefresh struct {
- Total int64 `json:"total,omitempty"`
- TotalTime string `json:"total_time,omitempty"`
- TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
-}
-
-type IndexStatsFlush struct {
- Total int64 `json:"total,omitempty"`
- TotalTime string `json:"total_time,omitempty"`
- TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
-}
-
-type IndexStatsWarmer struct {
- Current int64 `json:"current,omitempty"`
- Total int64 `json:"total,omitempty"`
- TotalTime string `json:"total_time,omitempty"`
- TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
-}
-
-type IndexStatsFilterCache struct {
- MemorySize string `json:"memory_size,omitempty"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
- Evictions int64 `json:"evictions,omitempty"`
-}
-
-type IndexStatsIdCache struct {
- MemorySize string `json:"memory_size,omitempty"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
-}
-
-type IndexStatsFielddata struct {
- MemorySize string `json:"memory_size,omitempty"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
- Evictions int64 `json:"evictions,omitempty"`
-}
-
-type IndexStatsPercolate struct {
- Total int64 `json:"total,omitempty"`
- GetTime string `json:"get_time,omitempty"`
- TimeInMillis int64 `json:"time_in_millis,omitempty"`
- Current int64 `json:"current,omitempty"`
- MemorySize string `json:"memory_size,omitempty"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
- Queries int64 `json:"queries,omitempty"`
-}
-
-type IndexStatsCompletion struct {
- Size string `json:"size,omitempty"`
- SizeInBytes int64 `json:"size_in_bytes,omitempty"`
-}
-
-type IndexStatsSegments struct {
- Count int64 `json:"count,omitempty"`
- Memory string `json:"memory,omitempty"`
- MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
- IndexWriterMemory string `json:"index_writer_memory,omitempty"`
- IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"`
- IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"`
- IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"`
- VersionMapMemory string `json:"version_map_memory,omitempty"`
- VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"`
- FixedBitSetMemory string `json:"fixed_bit_set,omitempty"`
- FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"`
-}
-
-type IndexStatsTranslog struct {
- Operations int64 `json:"operations,omitempty"`
- Size string `json:"size,omitempty"`
- SizeInBytes int64 `json:"size_in_bytes,omitempty"`
-}
-
-type IndexStatsSuggest struct {
- Total int64 `json:"total,omitempty"`
- Time string `json:"time,omitempty"`
- TimeInMillis int64 `json:"time_in_millis,omitempty"`
- Current int64 `json:"current,omitempty"`
-}
-
-type IndexStatsQueryCache struct {
- MemorySize string `json:"memory_size,omitempty"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
- Evictions int64 `json:"evictions,omitempty"`
- HitCount int64 `json:"hit_count,omitempty"`
- MissCount int64 `json:"miss_count,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/indices_stats_test.go b/vendor/github.com/olivere/elastic/indices_stats_test.go
deleted file mode 100644
index a3392c97a..000000000
--- a/vendor/github.com/olivere/elastic/indices_stats_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIndexStatsBuildURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Indices []string
- Metrics []string
- Expected string
- }{
- {
- []string{},
- []string{},
- "/_stats",
- },
- {
- []string{"index1"},
- []string{},
- "/index1/_stats",
- },
- {
- []string{},
- []string{"metric1"},
- "/_stats/metric1",
- },
- {
- []string{"index1"},
- []string{"metric1"},
- "/index1/_stats/metric1",
- },
- {
- []string{"index1", "index2"},
- []string{"metric1"},
- "/index1%2Cindex2/_stats/metric1",
- },
- {
- []string{"index1", "index2"},
- []string{"metric1", "metric2"},
- "/index1%2Cindex2/_stats/metric1%2Cmetric2",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL()
- if err != nil {
- t.Fatalf("case #%d: %v", i+1, err)
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestIndexStats(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- stats, err := client.IndexStats(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if stats == nil {
- t.Fatalf("expected response; got: %v", stats)
- }
- stat, found := stats.Indices[testIndexName]
- if !found {
- t.Fatalf("expected stats about index %q; got: %v", testIndexName, found)
- }
- if stat.Total == nil {
- t.Fatalf("expected total to be != nil; got: %v", stat.Total)
- }
- if stat.Total.Docs == nil {
- t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs)
- }
- if stat.Total.Docs.Count == 0 {
- t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_delete_pipeline.go b/vendor/github.com/olivere/elastic/ingest_delete_pipeline.go
deleted file mode 100644
index 78b6d04f2..000000000
--- a/vendor/github.com/olivere/elastic/ingest_delete_pipeline.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IngestDeletePipelineService deletes pipelines by ID.
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/delete-pipeline-api.html.
-type IngestDeletePipelineService struct {
- client *Client
- pretty bool
- id string
- masterTimeout string
- timeout string
-}
-
-// NewIngestDeletePipelineService creates a new IngestDeletePipelineService.
-func NewIngestDeletePipelineService(client *Client) *IngestDeletePipelineService {
- return &IngestDeletePipelineService{
- client: client,
- }
-}
-
-// Id is documented as: Pipeline ID.
-func (s *IngestDeletePipelineService) Id(id string) *IngestDeletePipelineService {
- s.id = id
- return s
-}
-
-// MasterTimeout is documented as: Explicit operation timeout for connection to master node.
-func (s *IngestDeletePipelineService) MasterTimeout(masterTimeout string) *IngestDeletePipelineService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout is documented as: Explicit operation timeout.
-func (s *IngestDeletePipelineService) Timeout(timeout string) *IngestDeletePipelineService {
- s.timeout = timeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IngestDeletePipelineService) Pretty(pretty bool) *IngestDeletePipelineService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
- "id": s.id,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IngestDeletePipelineService) Validate() error {
- var invalid []string
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipelineResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "DELETE",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IngestDeletePipelineResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do.
-type IngestDeletePipelineResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go
deleted file mode 100644
index 1163e0f17..000000000
--- a/vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestIngestDeletePipelineURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Id string
- Expected string
- }{
- {
- "my-pipeline-id",
- "/_ingest/pipeline/my-pipeline-id",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IngestDeletePipeline(test.Id).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_get_pipeline.go b/vendor/github.com/olivere/elastic/ingest_get_pipeline.go
deleted file mode 100644
index 16a683261..000000000
--- a/vendor/github.com/olivere/elastic/ingest_get_pipeline.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IngestGetPipelineService returns pipelines based on ID.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/get-pipeline-api.html
-// for documentation.
-type IngestGetPipelineService struct {
- client *Client
- pretty bool
- id []string
- masterTimeout string
-}
-
-// NewIngestGetPipelineService creates a new IngestGetPipelineService.
-func NewIngestGetPipelineService(client *Client) *IngestGetPipelineService {
- return &IngestGetPipelineService{
- client: client,
- }
-}
-
-// Id is a list of pipeline ids. Wildcards supported.
-func (s *IngestGetPipelineService) Id(id ...string) *IngestGetPipelineService {
- s.id = append(s.id, id...)
- return s
-}
-
-// MasterTimeout is an explicit operation timeout for connection to master node.
-func (s *IngestGetPipelineService) MasterTimeout(masterTimeout string) *IngestGetPipelineService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IngestGetPipelineService) Pretty(pretty bool) *IngestGetPipelineService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- // Build URL
- if len(s.id) > 0 {
- path, err = uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
- "id": strings.Join(s.id, ","),
- })
- } else {
- path = "/_ingest/pipeline"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IngestGetPipelineService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- var ret IngestGetPipelineResponse
- if err := json.Unmarshal(res.Body, &ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IngestGetPipelineResponse is the response of IngestGetPipelineService.Do.
-type IngestGetPipelineResponse map[string]*IngestGetPipeline
-
-type IngestGetPipeline struct {
- ID string `json:"id"`
- Config map[string]interface{} `json:"config"`
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go
deleted file mode 100644
index 009b717ca..000000000
--- a/vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestIngestGetPipelineURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Id []string
- Expected string
- }{
- {
- nil,
- "/_ingest/pipeline",
- },
- {
- []string{"my-pipeline-id"},
- "/_ingest/pipeline/my-pipeline-id",
- },
- {
- []string{"*"},
- "/_ingest/pipeline/%2A",
- },
- {
- []string{"pipeline-1", "pipeline-2"},
- "/_ingest/pipeline/pipeline-1%2Cpipeline-2",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IngestGetPipeline(test.Id...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestIngestLifecycle(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- // With the new ES Docker images, XPack is already installed and returns a pipeline. So we cannot test for "no pipelines". Skipping for now.
- /*
- // Get all pipelines (returns 404 that indicates an error)
- getres, err := client.IngestGetPipeline().Do(context.TODO())
- if err == nil {
- t.Fatal(err)
- }
- if getres != nil {
- t.Fatalf("expected no response, got %v", getres)
- }
- //*/
-
- // Add a pipeline
- pipelineDef := `{
- "description" : "reset retweets",
- "processors" : [
- {
- "set" : {
- "field": "retweets",
- "value": 0
- }
- }
- ]
-}`
- putres, err := client.IngestPutPipeline("my-pipeline").BodyString(pipelineDef).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if putres == nil {
- t.Fatal("expected response, got nil")
- }
- if want, have := true, putres.Acknowledged; want != have {
- t.Fatalf("expected ack = %v, got %v", want, have)
- }
-
- // Get all pipelines again
- getres, err := client.IngestGetPipeline().Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if have := len(getres); have == 0 {
- t.Fatalf("expected at least 1 pipeline, got %d", have)
- }
- if _, found := getres["my-pipeline"]; !found {
- t.Fatalf("expected to find pipline with id %q", "my-pipeline")
- }
-
- // Get pipeline by ID
- getres, err = client.IngestGetPipeline("my-pipeline").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if want, have := 1, len(getres); want != have {
- t.Fatalf("expected %d pipelines, got %d", want, have)
- }
- if _, found := getres["my-pipeline"]; !found {
- t.Fatalf("expected to find pipline with id %q", "my-pipeline")
- }
-
- // Delete pipeline
- delres, err := client.IngestDeletePipeline("my-pipeline").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if delres == nil {
- t.Fatal("expected response, got nil")
- }
- if want, have := true, delres.Acknowledged; want != have {
- t.Fatalf("expected ack = %v, got %v", want, have)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_put_pipeline.go b/vendor/github.com/olivere/elastic/ingest_put_pipeline.go
deleted file mode 100644
index 5781e7072..000000000
--- a/vendor/github.com/olivere/elastic/ingest_put_pipeline.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IngestPutPipelineService adds pipelines and updates existing pipelines in
-// the cluster.
-//
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/put-pipeline-api.html.
-type IngestPutPipelineService struct {
- client *Client
- pretty bool
- id string
- masterTimeout string
- timeout string
- bodyJson interface{}
- bodyString string
-}
-
-// NewIngestPutPipelineService creates a new IngestPutPipelineService.
-func NewIngestPutPipelineService(client *Client) *IngestPutPipelineService {
- return &IngestPutPipelineService{
- client: client,
- }
-}
-
-// Id is the pipeline ID.
-func (s *IngestPutPipelineService) Id(id string) *IngestPutPipelineService {
- s.id = id
- return s
-}
-
-// MasterTimeout is an explicit operation timeout for connection to master node.
-func (s *IngestPutPipelineService) MasterTimeout(masterTimeout string) *IngestPutPipelineService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout specifies an explicit operation timeout.
-func (s *IngestPutPipelineService) Timeout(timeout string) *IngestPutPipelineService {
- s.timeout = timeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IngestPutPipelineService) Pretty(pretty bool) *IngestPutPipelineService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is the ingest definition, defined as a JSON-serializable document.
-// Use e.g. a map[string]interface{} here.
-func (s *IngestPutPipelineService) BodyJson(body interface{}) *IngestPutPipelineService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is the ingest definition, specified as a string.
-func (s *IngestPutPipelineService) BodyString(body string) *IngestPutPipelineService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
- "id": s.id,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IngestPutPipelineService) Validate() error {
- var invalid []string
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if s.bodyString == "" && s.bodyJson == nil {
- invalid = append(invalid, "BodyJson")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "PUT",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IngestPutPipelineResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do.
-type IngestPutPipelineResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go
deleted file mode 100644
index 9609f2f53..000000000
--- a/vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestIngestPutPipelineURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Id string
- Expected string
- }{
- {
- "my-pipeline-id",
- "/_ingest/pipeline/my-pipeline-id",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IngestPutPipeline(test.Id).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go b/vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go
deleted file mode 100644
index 213f97bbb..000000000
--- a/vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// IngestSimulatePipelineService executes a specific pipeline against the set of
-// documents provided in the body of the request.
-//
-// The API is documented at
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/simulate-pipeline-api.html.
-type IngestSimulatePipelineService struct {
- client *Client
- pretty bool
- id string
- verbose *bool
- bodyJson interface{}
- bodyString string
-}
-
-// NewIngestSimulatePipelineService creates a new IngestSimulatePipeline.
-func NewIngestSimulatePipelineService(client *Client) *IngestSimulatePipelineService {
- return &IngestSimulatePipelineService{
- client: client,
- }
-}
-
-// Id specifies the pipeline ID.
-func (s *IngestSimulatePipelineService) Id(id string) *IngestSimulatePipelineService {
- s.id = id
- return s
-}
-
-// Verbose mode. Display data output for each processor in executed pipeline.
-func (s *IngestSimulatePipelineService) Verbose(verbose bool) *IngestSimulatePipelineService {
- s.verbose = &verbose
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *IngestSimulatePipelineService) Pretty(pretty bool) *IngestSimulatePipelineService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is the ingest definition, defined as a JSON-serializable simulate
-// definition. Use e.g. a map[string]interface{} here.
-func (s *IngestSimulatePipelineService) BodyJson(body interface{}) *IngestSimulatePipelineService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is the simulate definition, defined as a string.
-func (s *IngestSimulatePipelineService) BodyString(body string) *IngestSimulatePipelineService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- // Build URL
- if s.id != "" {
- path, err = uritemplates.Expand("/_ingest/pipeline/{id}/_simulate", map[string]string{
- "id": s.id,
- })
- } else {
- path = "/_ingest/pipeline/_simulate"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.verbose != nil {
- params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *IngestSimulatePipelineService) Validate() error {
- var invalid []string
- if s.bodyString == "" && s.bodyJson == nil {
- invalid = append(invalid, "BodyJson")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulatePipelineResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(IngestSimulatePipelineResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// IngestSimulatePipelineResponse is the response of IngestSimulatePipeline.Do.
-type IngestSimulatePipelineResponse struct {
- Docs []*IngestSimulateDocumentResult `json:"docs"`
-}
-
-type IngestSimulateDocumentResult struct {
- Doc map[string]interface{} `json:"doc"`
- ProcessorResults []*IngestSimulateProcessorResult `json:"processor_results"`
-}
-
-type IngestSimulateProcessorResult struct {
- ProcessorTag string `json:"tag"`
- Doc map[string]interface{} `json:"doc"`
-}
diff --git a/vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go
deleted file mode 100644
index a254f85ff..000000000
--- a/vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestIngestSimulatePipelineURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Id string
- Expected string
- }{
- {
- "",
- "/_ingest/pipeline/_simulate",
- },
- {
- "my-pipeline-id",
- "/_ingest/pipeline/my-pipeline-id/_simulate",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.IngestSimulatePipeline().Id(test.Id).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/inner_hit.go b/vendor/github.com/olivere/elastic/inner_hit.go
deleted file mode 100644
index c371fbf79..000000000
--- a/vendor/github.com/olivere/elastic/inner_hit.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// InnerHit implements a simple join for parent/child, nested, and even
-// top-level documents in Elasticsearch.
-// It is an experimental feature for Elasticsearch versions 1.5 (or greater).
-// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-inner-hits.html
-// for documentation.
-//
-// See the tests for SearchSource, HasChildFilter, HasChildQuery,
-// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery
-// for usage examples.
-type InnerHit struct {
- source *SearchSource
- path string
- typ string
-
- name string
-}
-
-// NewInnerHit creates a new InnerHit.
-func NewInnerHit() *InnerHit {
- return &InnerHit{source: NewSearchSource()}
-}
-
-func (hit *InnerHit) Path(path string) *InnerHit {
- hit.path = path
- return hit
-}
-
-func (hit *InnerHit) Type(typ string) *InnerHit {
- hit.typ = typ
- return hit
-}
-
-func (hit *InnerHit) Query(query Query) *InnerHit {
- hit.source.Query(query)
- return hit
-}
-
-func (hit *InnerHit) From(from int) *InnerHit {
- hit.source.From(from)
- return hit
-}
-
-func (hit *InnerHit) Size(size int) *InnerHit {
- hit.source.Size(size)
- return hit
-}
-
-func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit {
- hit.source.TrackScores(trackScores)
- return hit
-}
-
-func (hit *InnerHit) Explain(explain bool) *InnerHit {
- hit.source.Explain(explain)
- return hit
-}
-
-func (hit *InnerHit) Version(version bool) *InnerHit {
- hit.source.Version(version)
- return hit
-}
-
-func (hit *InnerHit) StoredField(storedFieldName string) *InnerHit {
- hit.source.StoredField(storedFieldName)
- return hit
-}
-
-func (hit *InnerHit) StoredFields(storedFieldNames ...string) *InnerHit {
- hit.source.StoredFields(storedFieldNames...)
- return hit
-}
-
-func (hit *InnerHit) NoStoredFields() *InnerHit {
- hit.source.NoStoredFields()
- return hit
-}
-
-func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit {
- hit.source.FetchSource(fetchSource)
- return hit
-}
-
-func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit {
- hit.source.FetchSourceContext(fetchSourceContext)
- return hit
-}
-
-func (hit *InnerHit) DocvalueFields(docvalueFields ...string) *InnerHit {
- hit.source.DocvalueFields(docvalueFields...)
- return hit
-}
-
-func (hit *InnerHit) DocvalueField(docvalueField string) *InnerHit {
- hit.source.DocvalueField(docvalueField)
- return hit
-}
-
-func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit {
- hit.source.ScriptFields(scriptFields...)
- return hit
-}
-
-func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit {
- hit.source.ScriptField(scriptField)
- return hit
-}
-
-func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit {
- hit.source.Sort(field, ascending)
- return hit
-}
-
-func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit {
- hit.source.SortWithInfo(info)
- return hit
-}
-
-func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit {
- hit.source.SortBy(sorter...)
- return hit
-}
-
-func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit {
- hit.source.Highlight(highlight)
- return hit
-}
-
-func (hit *InnerHit) Highlighter() *Highlight {
- return hit.source.Highlighter()
-}
-
-func (hit *InnerHit) Name(name string) *InnerHit {
- hit.name = name
- return hit
-}
-
-func (hit *InnerHit) Source() (interface{}, error) {
- src, err := hit.source.Source()
- if err != nil {
- return nil, err
- }
- source, ok := src.(map[string]interface{})
- if !ok {
- return nil, nil
- }
-
- // Notice that hit.typ and hit.path are not exported here.
- // They are only used with SearchSource and serialized there.
-
- if hit.name != "" {
- source["name"] = hit.name
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/inner_hit_test.go b/vendor/github.com/olivere/elastic/inner_hit_test.go
deleted file mode 100644
index fd9bd2e8a..000000000
--- a/vendor/github.com/olivere/elastic/inner_hit_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestInnerHitEmpty(t *testing.T) {
- hit := NewInnerHit()
- src, err := hit.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestInnerHitWithName(t *testing.T) {
- hit := NewInnerHit().Name("comments")
- src, err := hit.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"name":"comments"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/logger.go b/vendor/github.com/olivere/elastic/logger.go
deleted file mode 100644
index 095eb4cd4..000000000
--- a/vendor/github.com/olivere/elastic/logger.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// Logger specifies the interface for all log operations.
-type Logger interface {
- Printf(format string, v ...interface{})
-}
diff --git a/vendor/github.com/olivere/elastic/mget.go b/vendor/github.com/olivere/elastic/mget.go
deleted file mode 100644
index 5202a9603..000000000
--- a/vendor/github.com/olivere/elastic/mget.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-)
-
-// MgetService allows to get multiple documents based on an index,
-// type (optional) and id (possibly routing). The response includes
-// a docs array with all the fetched documents, each element similar
-// in structure to a document provided by the Get API.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-multi-get.html
-// for details.
-type MgetService struct {
- client *Client
- pretty bool
- preference string
- realtime *bool
- refresh string
- routing string
- storedFields []string
- items []*MultiGetItem
-}
-
-// NewMgetService initializes a new Multi GET API request call.
-func NewMgetService(client *Client) *MgetService {
- builder := &MgetService{
- client: client,
- }
- return builder
-}
-
-// Preference specifies the node or shard the operation should be performed
-// on (default: random).
-func (s *MgetService) Preference(preference string) *MgetService {
- s.preference = preference
- return s
-}
-
-// Refresh the shard containing the document before performing the operation.
-func (s *MgetService) Refresh(refresh string) *MgetService {
- s.refresh = refresh
- return s
-}
-
-// Realtime specifies whether to perform the operation in realtime or search mode.
-func (s *MgetService) Realtime(realtime bool) *MgetService {
- s.realtime = &realtime
- return s
-}
-
-// Routing is the specific routing value.
-func (s *MgetService) Routing(routing string) *MgetService {
- s.routing = routing
- return s
-}
-
-// StoredFields is a list of fields to return in the response.
-func (s *MgetService) StoredFields(storedFields ...string) *MgetService {
- s.storedFields = append(s.storedFields, storedFields...)
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *MgetService) Pretty(pretty bool) *MgetService {
- s.pretty = pretty
- return s
-}
-
-// Add an item to the request.
-func (s *MgetService) Add(items ...*MultiGetItem) *MgetService {
- s.items = append(s.items, items...)
- return s
-}
-
-// Source returns the request body, which will be serialized into JSON.
-func (s *MgetService) Source() (interface{}, error) {
- source := make(map[string]interface{})
- items := make([]interface{}, len(s.items))
- for i, item := range s.items {
- src, err := item.Source()
- if err != nil {
- return nil, err
- }
- items[i] = src
- }
- source["docs"] = items
- return source, nil
-}
-
-// Do executes the request.
-func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) {
- // Build url
- path := "/_mget"
-
- params := make(url.Values)
- if s.realtime != nil {
- params.Add("realtime", fmt.Sprintf("%v", *s.realtime))
- }
- if s.preference != "" {
- params.Add("preference", s.preference)
- }
- if s.refresh != "" {
- params.Add("refresh", s.refresh)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if len(s.storedFields) > 0 {
- params.Set("stored_fields", strings.Join(s.storedFields, ","))
- }
-
- // Set body
- body, err := s.Source()
- if err != nil {
- return nil, err
- }
-
- // Get response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return result
- ret := new(MgetResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Multi Get Item --
-
-// MultiGetItem is a single document to retrieve via the MgetService.
-type MultiGetItem struct {
- index string
- typ string
- id string
- routing string
- storedFields []string
- version *int64 // see org.elasticsearch.common.lucene.uid.Versions
- versionType string // see org.elasticsearch.index.VersionType
- fsc *FetchSourceContext
-}
-
-// NewMultiGetItem initializes a new, single item for a Multi GET request.
-func NewMultiGetItem() *MultiGetItem {
- return &MultiGetItem{}
-}
-
-// Index specifies the index name.
-func (item *MultiGetItem) Index(index string) *MultiGetItem {
- item.index = index
- return item
-}
-
-// Type specifies the type name.
-func (item *MultiGetItem) Type(typ string) *MultiGetItem {
- item.typ = typ
- return item
-}
-
-// Id specifies the identifier of the document.
-func (item *MultiGetItem) Id(id string) *MultiGetItem {
- item.id = id
- return item
-}
-
-// Routing is the specific routing value.
-func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
- item.routing = routing
- return item
-}
-
-// StoredFields is a list of fields to return in the response.
-func (item *MultiGetItem) StoredFields(storedFields ...string) *MultiGetItem {
- item.storedFields = append(item.storedFields, storedFields...)
- return item
-}
-
-// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
-// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
-// The default in Elasticsearch is MatchAny (-3).
-func (item *MultiGetItem) Version(version int64) *MultiGetItem {
- item.version = &version
- return item
-}
-
-// VersionType can be "internal", "external", "external_gt", or "external_gte".
-// See org.elasticsearch.index.VersionType in Elasticsearch source.
-// It is "internal" by default.
-func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
- item.versionType = versionType
- return item
-}
-
-// FetchSource allows to specify source filtering.
-func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
- item.fsc = fetchSourceContext
- return item
-}
-
-// Source returns the serialized JSON to be sent to Elasticsearch as
-// part of a MultiGet search.
-func (item *MultiGetItem) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- source["_id"] = item.id
-
- if item.index != "" {
- source["_index"] = item.index
- }
- if item.typ != "" {
- source["_type"] = item.typ
- }
- if item.fsc != nil {
- src, err := item.fsc.Source()
- if err != nil {
- return nil, err
- }
- source["_source"] = src
- }
- if item.routing != "" {
- source["_routing"] = item.routing
- }
- if len(item.storedFields) > 0 {
- source["stored_fields"] = strings.Join(item.storedFields, ",")
- }
- if item.version != nil {
- source["version"] = fmt.Sprintf("%d", *item.version)
- }
- if item.versionType != "" {
- source["version_type"] = item.versionType
- }
-
- return source, nil
-}
-
-// -- Result of a Multi Get request.
-
-// MgetResponse is the outcome of a Multi GET API request.
-type MgetResponse struct {
- Docs []*GetResult `json:"docs,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/mget_test.go b/vendor/github.com/olivere/elastic/mget_test.go
deleted file mode 100644
index 6b3ecd9f6..000000000
--- a/vendor/github.com/olivere/elastic/mget_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestMultiGet(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add some documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Count documents
- count, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 3 {
- t.Errorf("expected Count = %d; got %d", 3, count)
- }
-
- // Get documents 1 and 3
- res, err := client.MultiGet().
- Add(NewMultiGetItem().Index(testIndexName).Type("doc").Id("1")).
- Add(NewMultiGetItem().Index(testIndexName).Type("doc").Id("3")).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected result to be != nil; got nil")
- }
- if res.Docs == nil {
- t.Fatal("expected result docs to be != nil; got nil")
- }
- if len(res.Docs) != 2 {
- t.Fatalf("expected to have 2 docs; got %d", len(res.Docs))
- }
-
- item := res.Docs[0]
- if item.Error != nil {
- t.Errorf("expected no error on item 0; got %v", item.Error)
- }
- if item.Source == nil {
- t.Errorf("expected Source != nil; got %v", item.Source)
- }
- var doc tweet
- if err := json.Unmarshal(*item.Source, &doc); err != nil {
- t.Fatalf("expected to unmarshal item Source; got %v", err)
- }
- if doc.Message != tweet1.Message {
- t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message)
- }
-
- item = res.Docs[1]
- if item.Error != nil {
- t.Errorf("expected no error on item 1; got %v", item.Error)
- }
- if item.Source == nil {
- t.Errorf("expected Source != nil; got %v", item.Source)
- }
- if err := json.Unmarshal(*item.Source, &doc); err != nil {
- t.Fatalf("expected to unmarshal item Source; got %v", err)
- }
- if doc.Message != tweet3.Message {
- t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/msearch.go b/vendor/github.com/olivere/elastic/msearch.go
deleted file mode 100644
index c1a589a97..000000000
--- a/vendor/github.com/olivere/elastic/msearch.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-)
-
-// MultiSearch executes one or more searches in one roundtrip.
-type MultiSearchService struct {
- client *Client
- requests []*SearchRequest
- indices []string
- pretty bool
- maxConcurrentRequests *int
- preFilterShardSize *int
-}
-
-func NewMultiSearchService(client *Client) *MultiSearchService {
- builder := &MultiSearchService{
- client: client,
- }
- return builder
-}
-
-func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService {
- s.requests = append(s.requests, requests...)
- return s
-}
-
-func (s *MultiSearchService) Index(indices ...string) *MultiSearchService {
- s.indices = append(s.indices, indices...)
- return s
-}
-
-func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
- s.pretty = pretty
- return s
-}
-
-func (s *MultiSearchService) MaxConcurrentSearches(max int) *MultiSearchService {
- s.maxConcurrentRequests = &max
- return s
-}
-
-func (s *MultiSearchService) PreFilterShardSize(size int) *MultiSearchService {
- s.preFilterShardSize = &size
- return s
-}
-
-func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) {
- // Build url
- path := "/_msearch"
-
- // Parameters
- params := make(url.Values)
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- if v := s.maxConcurrentRequests; v != nil {
- params.Set("max_concurrent_searches", fmt.Sprintf("%v", *v))
- }
- if v := s.preFilterShardSize; v != nil {
- params.Set("pre_filter_shard_size", fmt.Sprintf("%v", *v))
- }
-
- // Set body
- var lines []string
- for _, sr := range s.requests {
- // Set default indices if not specified in the request
- if !sr.HasIndices() && len(s.indices) > 0 {
- sr = sr.Index(s.indices...)
- }
-
- header, err := json.Marshal(sr.header())
- if err != nil {
- return nil, err
- }
- body, err := sr.Body()
- if err != nil {
- return nil, err
- }
- lines = append(lines, string(header))
- lines = append(lines, body)
- }
- body := strings.Join(lines, "\n") + "\n" // add trailing \n
-
- // Get response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return result
- ret := new(MultiSearchResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// MultiSearchResult is the outcome of running a multi-search operation.
-type MultiSearchResult struct {
- Responses []*SearchResult `json:"responses,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/msearch_test.go b/vendor/github.com/olivere/elastic/msearch_test.go
deleted file mode 100644
index d25e2cc28..000000000
--- a/vendor/github.com/olivere/elastic/msearch_test.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- _ "net/http"
- "testing"
-)
-
-func TestMultiSearch(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- // client := setupTestClientAndCreateIndexAndLog(t)
-
- tweet1 := tweet{
- User: "olivere",
- Message: "Welcome to Golang and Elasticsearch.",
- Tags: []string{"golang", "elasticsearch"},
- }
- tweet2 := tweet{
- User: "olivere",
- Message: "Another unrelated topic.",
- Tags: []string{"golang"},
- }
- tweet3 := tweet{
- User: "sandrae",
- Message: "Cycling is fun.",
- Tags: []string{"sports", "cycling"},
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Spawn two search queries with one roundtrip
- q1 := NewMatchAllQuery()
- q2 := NewTermQuery("tags", "golang")
-
- sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2).
- Source(NewSearchSource().Query(q1).Size(10))
- sreq2 := NewSearchRequest().Index(testIndexName).Type("doc").
- Source(NewSearchSource().Query(q2))
-
- searchResult, err := client.MultiSearch().
- Add(sreq1, sreq2).
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Responses == nil {
- t.Fatal("expected responses != nil; got nil")
- }
- if len(searchResult.Responses) != 2 {
- t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses))
- }
-
- sres := searchResult.Responses[0]
- if sres.Hits == nil {
- t.Errorf("expected Hits != nil; got nil")
- }
- if sres.Hits.TotalHits != 3 {
- t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
- }
- if len(sres.Hits.Hits) != 3 {
- t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
- }
- for _, hit := range sres.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- sres = searchResult.Responses[1]
- if sres.Hits == nil {
- t.Errorf("expected Hits != nil; got nil")
- }
- if sres.Hits.TotalHits != 2 {
- t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits)
- }
- if len(sres.Hits.Hits) != 2 {
- t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits))
- }
- for _, hit := range sres.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestMultiSearchWithStrings(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- // client := setupTestClientAndCreateIndexAndLog(t)
-
- tweet1 := tweet{
- User: "olivere",
- Message: "Welcome to Golang and Elasticsearch.",
- Tags: []string{"golang", "elasticsearch"},
- }
- tweet2 := tweet{
- User: "olivere",
- Message: "Another unrelated topic.",
- Tags: []string{"golang"},
- }
- tweet3 := tweet{
- User: "sandrae",
- Message: "Cycling is fun.",
- Tags: []string{"sports", "cycling"},
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Spawn two search queries with one roundtrip
- sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2).
- Source(`{"query":{"match_all":{}}}`)
- sreq2 := NewSearchRequest().Index(testIndexName).Type("doc").
- Source(`{"query":{"term":{"tags":"golang"}}}`)
-
- searchResult, err := client.MultiSearch().
- Add(sreq1, sreq2).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Responses == nil {
- t.Fatal("expected responses != nil; got nil")
- }
- if len(searchResult.Responses) != 2 {
- t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses))
- }
-
- sres := searchResult.Responses[0]
- if sres.Hits == nil {
- t.Errorf("expected Hits != nil; got nil")
- }
- if sres.Hits.TotalHits != 3 {
- t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
- }
- if len(sres.Hits.Hits) != 3 {
- t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
- }
- for _, hit := range sres.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- sres = searchResult.Responses[1]
- if sres.Hits == nil {
- t.Errorf("expected Hits != nil; got nil")
- }
- if sres.Hits.TotalHits != 2 {
- t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits)
- }
- if len(sres.Hits.Hits) != 2 {
- t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits))
- }
- for _, hit := range sres.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestMultiSearchWithOneRequest(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere",
- Message: "Welcome to Golang and Elasticsearch.",
- Tags: []string{"golang", "elasticsearch"},
- }
- tweet2 := tweet{
- User: "olivere",
- Message: "Another unrelated topic.",
- Tags: []string{"golang"},
- }
- tweet3 := tweet{
- User: "sandrae",
- Message: "Cycling is fun.",
- Tags: []string{"sports", "cycling"},
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Spawn two search queries with one roundtrip
- query := NewMatchAllQuery()
- source := NewSearchSource().Query(query).Size(10)
- sreq := NewSearchRequest().Source(source)
-
- searchResult, err := client.MultiSearch().
- Index(testIndexName).
- Add(sreq).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Responses == nil {
- t.Fatal("expected responses != nil; got nil")
- }
- if len(searchResult.Responses) != 1 {
- t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses))
- }
-
- sres := searchResult.Responses[0]
- if sres.Hits == nil {
- t.Errorf("expected Hits != nil; got nil")
- }
- if sres.Hits.TotalHits != 3 {
- t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
- }
- if len(sres.Hits.Hits) != 3 {
- t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
- }
- for _, hit := range sres.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/mtermvectors.go b/vendor/github.com/olivere/elastic/mtermvectors.go
deleted file mode 100644
index 755718e67..000000000
--- a/vendor/github.com/olivere/elastic/mtermvectors.go
+++ /dev/null
@@ -1,475 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// MultiTermvectorService returns information and statistics on terms in the
-// fields of a particular document. The document could be stored in the
-// index or artificially provided by the user.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-multi-termvectors.html
-// for documentation.
-type MultiTermvectorService struct {
- client *Client
- pretty bool
- index string
- typ string
- fieldStatistics *bool
- fields []string
- ids []string
- offsets *bool
- parent string
- payloads *bool
- positions *bool
- preference string
- realtime *bool
- routing string
- termStatistics *bool
- version interface{}
- versionType string
- bodyJson interface{}
- bodyString string
- docs []*MultiTermvectorItem
-}
-
-// NewMultiTermvectorService creates a new MultiTermvectorService.
-func NewMultiTermvectorService(client *Client) *MultiTermvectorService {
- return &MultiTermvectorService{
- client: client,
- }
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *MultiTermvectorService) Pretty(pretty bool) *MultiTermvectorService {
- s.pretty = pretty
- return s
-}
-
-// Add adds documents to MultiTermvectors service.
-func (s *MultiTermvectorService) Add(docs ...*MultiTermvectorItem) *MultiTermvectorService {
- s.docs = append(s.docs, docs...)
- return s
-}
-
-// Index in which the document resides.
-func (s *MultiTermvectorService) Index(index string) *MultiTermvectorService {
- s.index = index
- return s
-}
-
-// Type of the document.
-func (s *MultiTermvectorService) Type(typ string) *MultiTermvectorService {
- s.typ = typ
- return s
-}
-
-// FieldStatistics specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) FieldStatistics(fieldStatistics bool) *MultiTermvectorService {
- s.fieldStatistics = &fieldStatistics
- return s
-}
-
-// Fields is a comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) Fields(fields []string) *MultiTermvectorService {
- s.fields = fields
- return s
-}
-
-// Ids is a comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body.
-func (s *MultiTermvectorService) Ids(ids []string) *MultiTermvectorService {
- s.ids = ids
- return s
-}
-
-// Offsets specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) Offsets(offsets bool) *MultiTermvectorService {
- s.offsets = &offsets
- return s
-}
-
-// Parent id of documents. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) Parent(parent string) *MultiTermvectorService {
- s.parent = parent
- return s
-}
-
-// Payloads specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) Payloads(payloads bool) *MultiTermvectorService {
- s.payloads = &payloads
- return s
-}
-
-// Positions specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) Positions(positions bool) *MultiTermvectorService {
- s.positions = &positions
- return s
-}
-
-// Preference specifies the node or shard the operation should be performed on (default: random). Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) Preference(preference string) *MultiTermvectorService {
- s.preference = preference
- return s
-}
-
-// Realtime specifies if requests are real-time as opposed to near-real-time (default: true).
-func (s *MultiTermvectorService) Realtime(realtime bool) *MultiTermvectorService {
- s.realtime = &realtime
- return s
-}
-
-// Routing specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) Routing(routing string) *MultiTermvectorService {
- s.routing = routing
- return s
-}
-
-// TermStatistics specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
-func (s *MultiTermvectorService) TermStatistics(termStatistics bool) *MultiTermvectorService {
- s.termStatistics = &termStatistics
- return s
-}
-
-// Version is explicit version number for concurrency control.
-func (s *MultiTermvectorService) Version(version interface{}) *MultiTermvectorService {
- s.version = version
- return s
-}
-
-// VersionType is specific version type.
-func (s *MultiTermvectorService) VersionType(versionType string) *MultiTermvectorService {
- s.versionType = versionType
- return s
-}
-
-// BodyJson is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation..
-func (s *MultiTermvectorService) BodyJson(body interface{}) *MultiTermvectorService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation..
-func (s *MultiTermvectorService) BodyString(body string) *MultiTermvectorService {
- s.bodyString = body
- return s
-}
-
-func (s *MultiTermvectorService) Source() interface{} {
- source := make(map[string]interface{})
- docs := make([]interface{}, len(s.docs))
- for i, doc := range s.docs {
- docs[i] = doc.Source()
- }
- source["docs"] = docs
- return source
-}
-
-// buildURL builds the URL for the operation.
-func (s *MultiTermvectorService) buildURL() (string, url.Values, error) {
- var path string
- var err error
-
- if s.index != "" && s.typ != "" {
- path, err = uritemplates.Expand("/{index}/{type}/_mtermvectors", map[string]string{
- "index": s.index,
- "type": s.typ,
- })
- } else if s.index != "" && s.typ == "" {
- path, err = uritemplates.Expand("/{index}/_mtermvectors", map[string]string{
- "index": s.index,
- })
- } else {
- path = "/_mtermvectors"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.fieldStatistics != nil {
- params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics))
- }
- if len(s.fields) > 0 {
- params.Set("fields", strings.Join(s.fields, ","))
- }
- if len(s.ids) > 0 {
- params.Set("ids", strings.Join(s.ids, ","))
- }
- if s.offsets != nil {
- params.Set("offsets", fmt.Sprintf("%v", *s.offsets))
- }
- if s.parent != "" {
- params.Set("parent", s.parent)
- }
- if s.payloads != nil {
- params.Set("payloads", fmt.Sprintf("%v", *s.payloads))
- }
- if s.positions != nil {
- params.Set("positions", fmt.Sprintf("%v", *s.positions))
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if s.realtime != nil {
- params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.termStatistics != nil {
- params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics))
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *MultiTermvectorService) Validate() error {
- var invalid []string
- if s.index == "" && s.typ != "" {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else if len(s.bodyString) > 0 {
- body = s.bodyString
- } else {
- body = s.Source()
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(MultiTermvectorResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// MultiTermvectorResponse is the response of MultiTermvectorService.Do.
-type MultiTermvectorResponse struct {
- Docs []*TermvectorsResponse `json:"docs"`
-}
-
-// -- MultiTermvectorItem --
-
-// MultiTermvectorItem is a single document to retrieve via MultiTermvectorService.
-type MultiTermvectorItem struct {
- index string
- typ string
- id string
- doc interface{}
- fieldStatistics *bool
- fields []string
- perFieldAnalyzer map[string]string
- offsets *bool
- parent string
- payloads *bool
- positions *bool
- preference string
- realtime *bool
- routing string
- termStatistics *bool
-}
-
-func NewMultiTermvectorItem() *MultiTermvectorItem {
- return &MultiTermvectorItem{}
-}
-
-func (s *MultiTermvectorItem) Index(index string) *MultiTermvectorItem {
- s.index = index
- return s
-}
-
-func (s *MultiTermvectorItem) Type(typ string) *MultiTermvectorItem {
- s.typ = typ
- return s
-}
-
-func (s *MultiTermvectorItem) Id(id string) *MultiTermvectorItem {
- s.id = id
- return s
-}
-
-// Doc is the document to analyze.
-func (s *MultiTermvectorItem) Doc(doc interface{}) *MultiTermvectorItem {
- s.doc = doc
- return s
-}
-
-// FieldStatistics specifies if document count, sum of document frequencies
-// and sum of total term frequencies should be returned.
-func (s *MultiTermvectorItem) FieldStatistics(fieldStatistics bool) *MultiTermvectorItem {
- s.fieldStatistics = &fieldStatistics
- return s
-}
-
-// Fields a list of fields to return.
-func (s *MultiTermvectorItem) Fields(fields ...string) *MultiTermvectorItem {
- if s.fields == nil {
- s.fields = make([]string, 0)
- }
- s.fields = append(s.fields, fields...)
- return s
-}
-
-// PerFieldAnalyzer allows to specify a different analyzer than the one
-// at the field.
-func (s *MultiTermvectorItem) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *MultiTermvectorItem {
- s.perFieldAnalyzer = perFieldAnalyzer
- return s
-}
-
-// Offsets specifies if term offsets should be returned.
-func (s *MultiTermvectorItem) Offsets(offsets bool) *MultiTermvectorItem {
- s.offsets = &offsets
- return s
-}
-
-// Parent id of documents.
-func (s *MultiTermvectorItem) Parent(parent string) *MultiTermvectorItem {
- s.parent = parent
- return s
-}
-
-// Payloads specifies if term payloads should be returned.
-func (s *MultiTermvectorItem) Payloads(payloads bool) *MultiTermvectorItem {
- s.payloads = &payloads
- return s
-}
-
-// Positions specifies if term positions should be returned.
-func (s *MultiTermvectorItem) Positions(positions bool) *MultiTermvectorItem {
- s.positions = &positions
- return s
-}
-
-// Preference specify the node or shard the operation
-// should be performed on (default: random).
-func (s *MultiTermvectorItem) Preference(preference string) *MultiTermvectorItem {
- s.preference = preference
- return s
-}
-
-// Realtime specifies if request is real-time as opposed to
-// near-real-time (default: true).
-func (s *MultiTermvectorItem) Realtime(realtime bool) *MultiTermvectorItem {
- s.realtime = &realtime
- return s
-}
-
-// Routing is a specific routing value.
-func (s *MultiTermvectorItem) Routing(routing string) *MultiTermvectorItem {
- s.routing = routing
- return s
-}
-
-// TermStatistics specifies if total term frequency and document frequency
-// should be returned.
-func (s *MultiTermvectorItem) TermStatistics(termStatistics bool) *MultiTermvectorItem {
- s.termStatistics = &termStatistics
- return s
-}
-
-// Source returns the serialized JSON to be sent to Elasticsearch as
-// part of a MultiTermvector.
-func (s *MultiTermvectorItem) Source() interface{} {
- source := make(map[string]interface{})
-
- source["_id"] = s.id
-
- if s.index != "" {
- source["_index"] = s.index
- }
- if s.typ != "" {
- source["_type"] = s.typ
- }
- if s.fields != nil {
- source["fields"] = s.fields
- }
- if s.fieldStatistics != nil {
- source["field_statistics"] = fmt.Sprintf("%v", *s.fieldStatistics)
- }
- if s.offsets != nil {
- source["offsets"] = s.offsets
- }
- if s.parent != "" {
- source["parent"] = s.parent
- }
- if s.payloads != nil {
- source["payloads"] = fmt.Sprintf("%v", *s.payloads)
- }
- if s.positions != nil {
- source["positions"] = fmt.Sprintf("%v", *s.positions)
- }
- if s.preference != "" {
- source["preference"] = s.preference
- }
- if s.realtime != nil {
- source["realtime"] = fmt.Sprintf("%v", *s.realtime)
- }
- if s.routing != "" {
- source["routing"] = s.routing
- }
- if s.termStatistics != nil {
- source["term_statistics"] = fmt.Sprintf("%v", *s.termStatistics)
- }
- if s.doc != nil {
- source["doc"] = s.doc
- }
- if s.perFieldAnalyzer != nil && len(s.perFieldAnalyzer) > 0 {
- source["per_field_analyzer"] = s.perFieldAnalyzer
- }
-
- return source
-}
diff --git a/vendor/github.com/olivere/elastic/mtermvectors_test.go b/vendor/github.com/olivere/elastic/mtermvectors_test.go
deleted file mode 100644
index 5f90cd5e2..000000000
--- a/vendor/github.com/olivere/elastic/mtermvectors_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestMultiTermVectorsValidateAndBuildURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Index string
- Type string
- Expected string
- ExpectValidateFailure bool
- }{
- // #0: No index, no type
- {
- "",
- "",
- "/_mtermvectors",
- false,
- },
- // #1: Index only
- {
- "twitter",
- "",
- "/twitter/_mtermvectors",
- false,
- },
- // #2: Type without index
- {
- "",
- "doc",
- "",
- true,
- },
- // #3: Both index and type
- {
- "twitter",
- "doc",
- "/twitter/doc/_mtermvectors",
- false,
- },
- }
-
- for i, test := range tests {
- builder := client.MultiTermVectors().Index(test.Index).Type(test.Type)
- // Validate
- err := builder.Validate()
- if err != nil {
- if !test.ExpectValidateFailure {
- t.Errorf("#%d: expected no error, got: %v", i, err)
- continue
- }
- } else {
- if test.ExpectValidateFailure {
- t.Errorf("#%d: expected error, got: nil", i)
- continue
- }
- // Build
- path, _, err := builder.buildURL()
- if err != nil {
- t.Errorf("#%d: expected no error, got: %v", i, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("#%d: expected %q; got: %q", i, test.Expected, path)
- }
- }
- }
-}
-
-func TestMultiTermVectorsWithIds(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Count documents
- count, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if count != 3 {
- t.Errorf("expected Count = %d; got %d", 3, count)
- }
-
- // MultiTermVectors by specifying ID by 1 and 3
- field := "Message"
- res, err := client.MultiTermVectors().
- Index(testIndexName).
- Type("doc").
- Add(NewMultiTermvectorItem().Index(testIndexName).Type("doc").Id("1").Fields(field)).
- Add(NewMultiTermvectorItem().Index(testIndexName).Type("doc").Id("3").Fields(field)).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected to return information and statistics")
- }
- if res.Docs == nil {
- t.Fatal("expected result docs to be != nil; got nil")
- }
- if len(res.Docs) != 2 {
- t.Fatalf("expected to have 2 docs; got %d", len(res.Docs))
- }
-}
diff --git a/vendor/github.com/olivere/elastic/nodes_info.go b/vendor/github.com/olivere/elastic/nodes_info.go
deleted file mode 100644
index 9f1422a69..000000000
--- a/vendor/github.com/olivere/elastic/nodes_info.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
- "time"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// NodesInfoService allows to retrieve one or more or all of the
-// cluster nodes information.
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-nodes-info.html.
-type NodesInfoService struct {
- client *Client
- pretty bool
- nodeId []string
- metric []string
- flatSettings *bool
- human *bool
-}
-
-// NewNodesInfoService creates a new NodesInfoService.
-func NewNodesInfoService(client *Client) *NodesInfoService {
- return &NodesInfoService{
- client: client,
- nodeId: []string{"_all"},
- metric: []string{"_all"},
- }
-}
-
-// NodeId is a list of node IDs or names to limit the returned information.
-// Use "_local" to return information from the node you're connecting to,
-// leave empty to get information from all nodes.
-func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService {
- s.nodeId = append(s.nodeId, nodeId...)
- return s
-}
-
-// Metric is a list of metrics you wish returned. Leave empty to return all.
-// Valid metrics are: settings, os, process, jvm, thread_pool, network,
-// transport, http, and plugins.
-func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService {
- s.metric = append(s.metric, metric...)
- return s
-}
-
-// FlatSettings returns settings in flat format (default: false).
-func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService {
- s.flatSettings = &flatSettings
- return s
-}
-
-// Human indicates whether to return time and byte values in human-readable format.
-func (s *NodesInfoService) Human(human bool) *NodesInfoService {
- s.human = &human
- return s
-}
-
-// Pretty indicates whether to indent the returned JSON.
-func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *NodesInfoService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{
- "node_id": strings.Join(s.nodeId, ","),
- "metric": strings.Join(s.metric, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.flatSettings != nil {
- params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
- }
- if s.human != nil {
- params.Set("human", fmt.Sprintf("%v", *s.human))
- }
- if s.pretty {
- params.Set("pretty", "true")
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *NodesInfoService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(NodesInfoResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// NodesInfoResponse is the response of NodesInfoService.Do.
-type NodesInfoResponse struct {
- ClusterName string `json:"cluster_name"`
- Nodes map[string]*NodesInfoNode `json:"nodes"`
-}
-
-type NodesInfoNode struct {
- // Name of the node, e.g. "Mister Fear"
- Name string `json:"name"`
- // TransportAddress, e.g. "127.0.0.1:9300"
- TransportAddress string `json:"transport_address"`
- // Host is the host name, e.g. "macbookair"
- Host string `json:"host"`
- // IP is the IP address, e.g. "192.168.1.2"
- IP string `json:"ip"`
- // Version is the Elasticsearch version running on the node, e.g. "1.4.3"
- Version string `json:"version"`
- // Build is the Elasticsearch build, e.g. "36a29a7"
- Build string `json:"build"`
- // HTTPAddress, e.g. "127.0.0.1:9200"
- HTTPAddress string `json:"http_address"`
- // HTTPSAddress, e.g. "127.0.0.1:9200"
- HTTPSAddress string `json:"https_address"`
-
- // Attributes of the node.
- Attributes map[string]interface{} `json:"attributes"`
-
- // Settings of the node, e.g. paths and pidfile.
- Settings map[string]interface{} `json:"settings"`
-
- // OS information, e.g. CPU and memory.
- OS *NodesInfoNodeOS `json:"os"`
-
- // Process information, e.g. max file descriptors.
- Process *NodesInfoNodeProcess `json:"process"`
-
- // JVM information, e.g. VM version.
- JVM *NodesInfoNodeJVM `json:"jvm"`
-
- // ThreadPool information.
- ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
-
- // Network information.
- Network *NodesInfoNodeNetwork `json:"network"`
-
- // Network information.
- Transport *NodesInfoNodeTransport `json:"transport"`
-
- // HTTP information.
- HTTP *NodesInfoNodeHTTP `json:"http"`
-
- // Plugins information.
- Plugins []*NodesInfoNodePlugin `json:"plugins"`
-}
-
-type NodesInfoNodeOS struct {
- RefreshInterval string `json:"refresh_interval"` // e.g. 1s
- RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
- AvailableProcessors int `json:"available_processors"` // e.g. 4
-
- // CPU information
- CPU struct {
- Vendor string `json:"vendor"` // e.g. Intel
- Model string `json:"model"` // e.g. iMac15,1
- MHz int `json:"mhz"` // e.g. 3500
- TotalCores int `json:"total_cores"` // e.g. 4
- TotalSockets int `json:"total_sockets"` // e.g. 4
- CoresPerSocket int `json:"cores_per_socket"` // e.g. 16
- CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256
- } `json:"cpu"`
-
- // Mem information
- Mem struct {
- Total string `json:"total"` // e.g. 16gb
- TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184
- } `json:"mem"`
-
- // Swap information
- Swap struct {
- Total string `json:"total"` // e.g. 1gb
- TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824
- } `json:"swap"`
-}
-
-type NodesInfoNodeProcess struct {
- RefreshInterval string `json:"refresh_interval"` // e.g. 1s
- RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
- ID int `json:"id"` // process id, e.g. 87079
- MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768
- Mlockall bool `json:"mlockall"` // e.g. false
-}
-
-type NodesInfoNodeJVM struct {
- PID int `json:"pid"` // process id, e.g. 87079
- Version string `json:"version"` // e.g. "1.8.0_25"
- VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
- VMVersion string `json:"vm_version"` // e.g. "25.25-b02"
- VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
- StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z"
- StartTimeInMillis int64 `json:"start_time_in_millis"`
-
- // Mem information
- Mem struct {
- HeapInit string `json:"heap_init"` // e.g. 1gb
- HeapInitInBytes int `json:"heap_init_in_bytes"`
- HeapMax string `json:"heap_max"` // e.g. 4gb
- HeapMaxInBytes int `json:"heap_max_in_bytes"`
- NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb
- NonHeapInitInBytes int `json:"non_heap_init_in_bytes"`
- NonHeapMax string `json:"non_heap_max"` // e.g. 0b
- NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"`
- DirectMax string `json:"direct_max"` // e.g. 4gb
- DirectMaxInBytes int `json:"direct_max_in_bytes"`
- } `json:"mem"`
-
- GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"]
- MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"]
-}
-
-type NodesInfoNodeThreadPool struct {
- Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"`
- Bench *NodesInfoNodeThreadPoolSection `json:"bench"`
- Listener *NodesInfoNodeThreadPoolSection `json:"listener"`
- Index *NodesInfoNodeThreadPoolSection `json:"index"`
- Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"`
- Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"`
- Generic *NodesInfoNodeThreadPoolSection `json:"generic"`
- Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"`
- Search *NodesInfoNodeThreadPoolSection `json:"search"`
- Flush *NodesInfoNodeThreadPoolSection `json:"flush"`
- Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"`
- Management *NodesInfoNodeThreadPoolSection `json:"management"`
- Get *NodesInfoNodeThreadPoolSection `json:"get"`
- Merge *NodesInfoNodeThreadPoolSection `json:"merge"`
- Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"`
- Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"`
-}
-
-type NodesInfoNodeThreadPoolSection struct {
- Type string `json:"type"` // e.g. fixed
- Min int `json:"min"` // e.g. 4
- Max int `json:"max"` // e.g. 4
- KeepAlive string `json:"keep_alive"` // e.g. "5m"
- QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1
-}
-
-type NodesInfoNodeNetwork struct {
- RefreshInterval string `json:"refresh_interval"` // e.g. 1s
- RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
- PrimaryInterface struct {
- Address string `json:"address"` // e.g. 192.168.1.2
- Name string `json:"name"` // e.g. en0
- MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66
- } `json:"primary_interface"`
-}
-
-type NodesInfoNodeTransport struct {
- BoundAddress []string `json:"bound_address"`
- PublishAddress string `json:"publish_address"`
- Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"`
-}
-
-type NodesInfoNodeTransportProfile struct {
- BoundAddress []string `json:"bound_address"`
- PublishAddress string `json:"publish_address"`
-}
-
-type NodesInfoNodeHTTP struct {
- BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"]
- PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300"
- MaxContentLength string `json:"max_content_length"` // e.g. "100mb"
- MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"`
-}
-
-type NodesInfoNodePlugin struct {
- Name string `json:"name"`
- Description string `json:"description"`
- Site bool `json:"site"`
- JVM bool `json:"jvm"`
- URL string `json:"url"` // e.g. /_plugin/dummy/
-}
diff --git a/vendor/github.com/olivere/elastic/nodes_info_test.go b/vendor/github.com/olivere/elastic/nodes_info_test.go
deleted file mode 100644
index 41d997584..000000000
--- a/vendor/github.com/olivere/elastic/nodes_info_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestNodesInfo(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
-
- info, err := client.NodesInfo().Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if info == nil {
- t.Fatal("expected nodes info")
- }
-
- if info.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", info.ClusterName)
- }
- if len(info.Nodes) == 0 {
- t.Errorf("expected some nodes; got: %d", len(info.Nodes))
- }
- for id, node := range info.Nodes {
- if id == "" {
- t.Errorf("expected node id; got: %q", id)
- }
- if node == nil {
- t.Fatalf("expected node info; got: %v", node)
- }
- if node.IP == "" {
- t.Errorf("expected node IP; got: %q", node.IP)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/nodes_stats.go b/vendor/github.com/olivere/elastic/nodes_stats.go
deleted file mode 100644
index 7c5f0c9d6..000000000
--- a/vendor/github.com/olivere/elastic/nodes_stats.go
+++ /dev/null
@@ -1,703 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// NodesStatsService returns node statistics.
-// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-stats.html
-// for details.
-type NodesStatsService struct {
- client *Client
- pretty bool
- metric []string
- indexMetric []string
- nodeId []string
- completionFields []string
- fielddataFields []string
- fields []string
- groups *bool
- human *bool
- level string
- timeout string
- types []string
-}
-
-// NewNodesStatsService creates a new NodesStatsService.
-func NewNodesStatsService(client *Client) *NodesStatsService {
- return &NodesStatsService{
- client: client,
- }
-}
-
-// Metric limits the information returned to the specified metrics.
-func (s *NodesStatsService) Metric(metric ...string) *NodesStatsService {
- s.metric = append(s.metric, metric...)
- return s
-}
-
-// IndexMetric limits the information returned for `indices` metric
-// to the specific index metrics. Isn't used if `indices` (or `all`)
-// metric isn't specified..
-func (s *NodesStatsService) IndexMetric(indexMetric ...string) *NodesStatsService {
- s.indexMetric = append(s.indexMetric, indexMetric...)
- return s
-}
-
-// NodeId is a list of node IDs or names to limit the returned information;
-// use `_local` to return information from the node you're connecting to,
-// leave empty to get information from all nodes.
-func (s *NodesStatsService) NodeId(nodeId ...string) *NodesStatsService {
- s.nodeId = append(s.nodeId, nodeId...)
- return s
-}
-
-// CompletionFields is a list of fields for `fielddata` and `suggest`
-// index metric (supports wildcards).
-func (s *NodesStatsService) CompletionFields(completionFields ...string) *NodesStatsService {
- s.completionFields = append(s.completionFields, completionFields...)
- return s
-}
-
-// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
-func (s *NodesStatsService) FielddataFields(fielddataFields ...string) *NodesStatsService {
- s.fielddataFields = append(s.fielddataFields, fielddataFields...)
- return s
-}
-
-// Fields is a list of fields for `fielddata` and `completion` index metric (supports wildcards).
-func (s *NodesStatsService) Fields(fields ...string) *NodesStatsService {
- s.fields = append(s.fields, fields...)
- return s
-}
-
-// Groups is a list of search groups for `search` index metric.
-func (s *NodesStatsService) Groups(groups bool) *NodesStatsService {
- s.groups = &groups
- return s
-}
-
-// Human indicates whether to return time and byte values in human-readable format.
-func (s *NodesStatsService) Human(human bool) *NodesStatsService {
- s.human = &human
- return s
-}
-
-// Level specifies whether to return indices stats aggregated at node, index or shard level.
-func (s *NodesStatsService) Level(level string) *NodesStatsService {
- s.level = level
- return s
-}
-
-// Timeout specifies an explicit operation timeout.
-func (s *NodesStatsService) Timeout(timeout string) *NodesStatsService {
- s.timeout = timeout
- return s
-}
-
-// Types a list of document types for the `indexing` index metric.
-func (s *NodesStatsService) Types(types ...string) *NodesStatsService {
- s.types = append(s.types, types...)
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *NodesStatsService) Pretty(pretty bool) *NodesStatsService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *NodesStatsService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 {
- path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}/{index_metric}", map[string]string{
- "index_metric": strings.Join(s.indexMetric, ","),
- "node_id": strings.Join(s.nodeId, ","),
- "metric": strings.Join(s.metric, ","),
- })
- } else if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 {
- path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}", map[string]string{
- "node_id": strings.Join(s.nodeId, ","),
- "metric": strings.Join(s.metric, ","),
- })
- } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 {
- path, err = uritemplates.Expand("/_nodes/{node_id}/stats/_all/{index_metric}", map[string]string{
- "index_metric": strings.Join(s.indexMetric, ","),
- "node_id": strings.Join(s.nodeId, ","),
- })
- } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 {
- path, err = uritemplates.Expand("/_nodes/{node_id}/stats", map[string]string{
- "node_id": strings.Join(s.nodeId, ","),
- })
- } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 {
- path, err = uritemplates.Expand("/_nodes/stats/{metric}/{index_metric}", map[string]string{
- "index_metric": strings.Join(s.indexMetric, ","),
- "metric": strings.Join(s.metric, ","),
- })
- } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 {
- path, err = uritemplates.Expand("/_nodes/stats/{metric}", map[string]string{
- "metric": strings.Join(s.metric, ","),
- })
- } else if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 {
- path, err = uritemplates.Expand("/_nodes/stats/_all/{index_metric}", map[string]string{
- "index_metric": strings.Join(s.indexMetric, ","),
- })
- } else { // if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 {
- path = "/_nodes/stats"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if len(s.completionFields) > 0 {
- params.Set("completion_fields", strings.Join(s.completionFields, ","))
- }
- if len(s.fielddataFields) > 0 {
- params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
- }
- if len(s.fields) > 0 {
- params.Set("fields", strings.Join(s.fields, ","))
- }
- if s.groups != nil {
- params.Set("groups", fmt.Sprintf("%v", *s.groups))
- }
- if s.human != nil {
- params.Set("human", fmt.Sprintf("%v", *s.human))
- }
- if s.level != "" {
- params.Set("level", s.level)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if len(s.types) > 0 {
- params.Set("types", strings.Join(s.types, ","))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *NodesStatsService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(NodesStatsResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// NodesStatsResponse is the response of NodesStatsService.Do.
-type NodesStatsResponse struct {
- ClusterName string `json:"cluster_name"`
- Nodes map[string]*NodesStatsNode `json:"nodes"`
-}
-
-type NodesStatsNode struct {
- // Timestamp when these stats we're gathered.
- Timestamp int64 `json:"timestamp"`
- // Name of the node, e.g. "Mister Fear"
- Name string `json:"name"`
- // TransportAddress, e.g. "127.0.0.1:9300"
- TransportAddress string `json:"transport_address"`
- // Host is the host name, e.g. "macbookair"
- Host string `json:"host"`
- // IP is an IP address, e.g. "192.168.1.2"
- IP string `json:"ip"`
- // Roles is a list of the roles of the node, e.g. master, data, ingest.
- Roles []string `json:"roles"`
-
- // Attributes of the node.
- Attributes map[string]interface{} `json:"attributes"`
-
- // Indices returns index information.
- Indices *NodesStatsIndex `json:"indices"`
-
- // OS information, e.g. CPU and memory.
- OS *NodesStatsNodeOS `json:"os"`
-
- // Process information, e.g. max file descriptors.
- Process *NodesStatsNodeProcess `json:"process"`
-
- // JVM information, e.g. VM version.
- JVM *NodesStatsNodeJVM `json:"jvm"`
-
- // ThreadPool information.
- ThreadPool map[string]*NodesStatsNodeThreadPool `json:"thread_pool"`
-
- // FS returns information about the filesystem.
- FS *NodesStatsNodeFS `json:"fs"`
-
- // Network information.
- Transport *NodesStatsNodeTransport `json:"transport"`
-
- // HTTP information.
- HTTP *NodesStatsNodeHTTP `json:"http"`
-
- // Breaker contains information about circuit breakers.
- Breaker map[string]*NodesStatsBreaker `json:"breakers"`
-
- // ScriptStats information.
- ScriptStats *NodesStatsScriptStats `json:"script"`
-
- // Discovery information.
- Discovery *NodesStatsDiscovery `json:"discovery"`
-
- // Ingest information
- Ingest *NodesStatsIngest `json:"ingest"`
-}
-
-type NodesStatsIndex struct {
- Docs *NodesStatsDocsStats `json:"docs"`
- Store *NodesStatsStoreStats `json:"store"`
- Indexing *NodesStatsIndexingStats `json:"indexing"`
- Get *NodesStatsGetStats `json:"get"`
- Search *NodesStatsSearchStats `json:"search"`
- Merges *NodesStatsMergeStats `json:"merges"`
- Refresh *NodesStatsRefreshStats `json:"refresh"`
- Flush *NodesStatsFlushStats `json:"flush"`
- Warmer *NodesStatsWarmerStats `json:"warmer"`
- QueryCache *NodesStatsQueryCacheStats `json:"query_cache"`
- Fielddata *NodesStatsFielddataStats `json:"fielddata"`
- Percolate *NodesStatsPercolateStats `json:"percolate"`
- Completion *NodesStatsCompletionStats `json:"completion"`
- Segments *NodesStatsSegmentsStats `json:"segments"`
- Translog *NodesStatsTranslogStats `json:"translog"`
- Suggest *NodesStatsSuggestStats `json:"suggest"`
- RequestCache *NodesStatsRequestCacheStats `json:"request_cache"`
- Recovery NodesStatsRecoveryStats `json:"recovery"`
-
- Indices map[string]*NodesStatsIndex `json:"indices"` // for level=indices
- Shards map[string]*NodesStatsIndex `json:"shards"` // for level=shards
-}
-
-type NodesStatsDocsStats struct {
- Count int64 `json:"count"`
- Deleted int64 `json:"deleted"`
-}
-
-type NodesStatsStoreStats struct {
- Size string `json:"size"`
- SizeInBytes int64 `json:"size_in_bytes"`
-}
-
-type NodesStatsIndexingStats struct {
- IndexTotal int64 `json:"index_total"`
- IndexTime string `json:"index_time"`
- IndexTimeInMillis int64 `json:"index_time_in_millis"`
- IndexCurrent int64 `json:"index_current"`
- IndexFailed int64 `json:"index_failed"`
- DeleteTotal int64 `json:"delete_total"`
- DeleteTime string `json:"delete_time"`
- DeleteTimeInMillis int64 `json:"delete_time_in_millis"`
- DeleteCurrent int64 `json:"delete_current"`
- NoopUpdateTotal int64 `json:"noop_update_total"`
-
- Types map[string]*NodesStatsIndexingStats `json:"types"` // stats for individual types
-}
-
-type NodesStatsGetStats struct {
- Total int64 `json:"total"`
- Time string `json:"get_time"`
- TimeInMillis int64 `json:"time_in_millis"`
- Exists int64 `json:"exists"`
- ExistsTime string `json:"exists_time"`
- ExistsTimeInMillis int64 `json:"exists_in_millis"`
- Missing int64 `json:"missing"`
- MissingTime string `json:"missing_time"`
- MissingTimeInMillis int64 `json:"missing_in_millis"`
- Current int64 `json:"current"`
-}
-
-type NodesStatsSearchStats struct {
- OpenContexts int64 `json:"open_contexts"`
- QueryTotal int64 `json:"query_total"`
- QueryTime string `json:"query_time"`
- QueryTimeInMillis int64 `json:"query_time_in_millis"`
- QueryCurrent int64 `json:"query_current"`
- FetchTotal int64 `json:"fetch_total"`
- FetchTime string `json:"fetch_time"`
- FetchTimeInMillis int64 `json:"fetch_time_in_millis"`
- FetchCurrent int64 `json:"fetch_current"`
- ScrollTotal int64 `json:"scroll_total"`
- ScrollTime string `json:"scroll_time"`
- ScrollTimeInMillis int64 `json:"scroll_time_in_millis"`
- ScrollCurrent int64 `json:"scroll_current"`
-
- Groups map[string]*NodesStatsSearchStats `json:"groups"` // stats for individual groups
-}
-
-type NodesStatsMergeStats struct {
- Current int64 `json:"current"`
- CurrentDocs int64 `json:"current_docs"`
- CurrentSize string `json:"current_size"`
- CurrentSizeInBytes int64 `json:"current_size_in_bytes"`
- Total int64 `json:"total"`
- TotalTime string `json:"total_time"`
- TotalTimeInMillis int64 `json:"total_time_in_millis"`
- TotalDocs int64 `json:"total_docs"`
- TotalSize string `json:"total_size"`
- TotalSizeInBytes int64 `json:"total_size_in_bytes"`
- TotalStoppedTime string `json:"total_stopped_time"`
- TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"`
- TotalThrottledTime string `json:"total_throttled_time"`
- TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"`
- TotalThrottleBytes string `json:"total_auto_throttle"`
- TotalThrottleBytesInBytes int64 `json:"total_auto_throttle_in_bytes"`
-}
-
-type NodesStatsRefreshStats struct {
- Total int64 `json:"total"`
- TotalTime string `json:"total_time"`
- TotalTimeInMillis int64 `json:"total_time_in_millis"`
-}
-
-type NodesStatsFlushStats struct {
- Total int64 `json:"total"`
- TotalTime string `json:"total_time"`
- TotalTimeInMillis int64 `json:"total_time_in_millis"`
-}
-
-type NodesStatsWarmerStats struct {
- Current int64 `json:"current"`
- Total int64 `json:"total"`
- TotalTime string `json:"total_time"`
- TotalTimeInMillis int64 `json:"total_time_in_millis"`
-}
-
-type NodesStatsQueryCacheStats struct {
- MemorySize string `json:"memory_size"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- TotalCount int64 `json:"total_count"`
- HitCount int64 `json:"hit_count"`
- MissCount int64 `json:"miss_count"`
- CacheSize int64 `json:"cache_size"`
- CacheCount int64 `json:"cache_count"`
- Evictions int64 `json:"evictions"`
-}
-
-type NodesStatsFielddataStats struct {
- MemorySize string `json:"memory_size"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- Evictions int64 `json:"evictions"`
- Fields map[string]struct {
- MemorySize string `json:"memory_size"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- } `json:"fields"`
-}
-
-type NodesStatsPercolateStats struct {
- Total int64 `json:"total"`
- Time string `json:"time"`
- TimeInMillis int64 `json:"time_in_millis"`
- Current int64 `json:"current"`
- MemorySize string `json:"memory_size"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- Queries int64 `json:"queries"`
-}
-
-type NodesStatsCompletionStats struct {
- Size string `json:"size"`
- SizeInBytes int64 `json:"size_in_bytes"`
- Fields map[string]struct {
- Size string `json:"size"`
- SizeInBytes int64 `json:"size_in_bytes"`
- } `json:"fields"`
-}
-
-type NodesStatsSegmentsStats struct {
- Count int64 `json:"count"`
- Memory string `json:"memory"`
- MemoryInBytes int64 `json:"memory_in_bytes"`
- TermsMemory string `json:"terms_memory"`
- TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"`
- StoredFieldsMemory string `json:"stored_fields_memory"`
- StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"`
- TermVectorsMemory string `json:"term_vectors_memory"`
- TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"`
- NormsMemory string `json:"norms_memory"`
- NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"`
- DocValuesMemory string `json:"doc_values_memory"`
- DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"`
- IndexWriterMemory string `json:"index_writer_memory"`
- IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
- IndexWriterMaxMemory string `json:"index_writer_max_memory"`
- IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
- VersionMapMemory string `json:"version_map_memory"`
- VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
- FixedBitSetMemory string `json:"fixed_bit_set"` // not a typo
- FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
-}
-
-type NodesStatsTranslogStats struct {
- Operations int64 `json:"operations"`
- Size string `json:"size"`
- SizeInBytes int64 `json:"size_in_bytes"`
-}
-
-type NodesStatsSuggestStats struct {
- Total int64 `json:"total"`
- TotalTime string `json:"total_time"`
- TotalTimeInMillis int64 `json:"total_time_in_millis"`
- Current int64 `json:"current"`
-}
-
-type NodesStatsRequestCacheStats struct {
- MemorySize string `json:"memory_size"`
- MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
- Evictions int64 `json:"evictions"`
- HitCount int64 `json:"hit_count"`
- MissCount int64 `json:"miss_count"`
-}
-
-type NodesStatsRecoveryStats struct {
- CurrentAsSource int `json:"current_as_source"`
- CurrentAsTarget int `json:"current_as_target"`
-}
-
-type NodesStatsNodeOS struct {
- Timestamp int64 `json:"timestamp"`
- CPU *NodesStatsNodeOSCPU `json:"cpu"`
- Mem *NodesStatsNodeOSMem `json:"mem"`
- Swap *NodesStatsNodeOSSwap `json:"swap"`
-}
-
-type NodesStatsNodeOSCPU struct {
- Percent int `json:"percent"`
- LoadAverage map[string]float64 `json:"load_average"` // keys are: 1m, 5m, and 15m
-}
-
-type NodesStatsNodeOSMem struct {
- Total string `json:"total"`
- TotalInBytes int64 `json:"total_in_bytes"`
- Free string `json:"free"`
- FreeInBytes int64 `json:"free_in_bytes"`
- Used string `json:"used"`
- UsedInBytes int64 `json:"used_in_bytes"`
- FreePercent int `json:"free_percent"`
- UsedPercent int `json:"used_percent"`
-}
-
-type NodesStatsNodeOSSwap struct {
- Total string `json:"total"`
- TotalInBytes int64 `json:"total_in_bytes"`
- Free string `json:"free"`
- FreeInBytes int64 `json:"free_in_bytes"`
- Used string `json:"used"`
- UsedInBytes int64 `json:"used_in_bytes"`
-}
-
-type NodesStatsNodeProcess struct {
- Timestamp int64 `json:"timestamp"`
- OpenFileDescriptors int64 `json:"open_file_descriptors"`
- MaxFileDescriptors int64 `json:"max_file_descriptors"`
- CPU struct {
- Percent int `json:"percent"`
- Total string `json:"total"`
- TotalInMillis int64 `json:"total_in_millis"`
- } `json:"cpu"`
- Mem struct {
- TotalVirtual string `json:"total_virtual"`
- TotalVirtualInBytes int64 `json:"total_virtual_in_bytes"`
- } `json:"mem"`
-}
-
-type NodesStatsNodeJVM struct {
- Timestamp int64 `json:"timestamp"`
- Uptime string `json:"uptime"`
- UptimeInMillis int64 `json:"uptime_in_millis"`
- Mem *NodesStatsNodeJVMMem `json:"mem"`
- Threads *NodesStatsNodeJVMThreads `json:"threads"`
- GC *NodesStatsNodeJVMGC `json:"gc"`
- BufferPools map[string]*NodesStatsNodeJVMBufferPool `json:"buffer_pools"`
- Classes *NodesStatsNodeJVMClasses `json:"classes"`
-}
-
-type NodesStatsNodeJVMMem struct {
- HeapUsed string `json:"heap_used"`
- HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
- HeapUsedPercent int `json:"heap_used_percent"`
- HeapCommitted string `json:"heap_committed"`
- HeapCommittedInBytes int64 `json:"heap_committed_in_bytes"`
- HeapMax string `json:"heap_max"`
- HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
- NonHeapUsed string `json:"non_heap_used"`
- NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"`
- NonHeapCommitted string `json:"non_heap_committed"`
- NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"`
- Pools map[string]struct {
- Used string `json:"used"`
- UsedInBytes int64 `json:"used_in_bytes"`
- Max string `json:"max"`
- MaxInBytes int64 `json:"max_in_bytes"`
- PeakUsed string `json:"peak_used"`
- PeakUsedInBytes int64 `json:"peak_used_in_bytes"`
- PeakMax string `json:"peak_max"`
- PeakMaxInBytes int64 `json:"peak_max_in_bytes"`
- } `json:"pools"`
-}
-
-type NodesStatsNodeJVMThreads struct {
- Count int64 `json:"count"`
- PeakCount int64 `json:"peak_count"`
-}
-
-type NodesStatsNodeJVMGC struct {
- Collectors map[string]*NodesStatsNodeJVMGCCollector `json:"collectors"`
-}
-
-type NodesStatsNodeJVMGCCollector struct {
- CollectionCount int64 `json:"collection_count"`
- CollectionTime string `json:"collection_time"`
- CollectionTimeInMillis int64 `json:"collection_time_in_millis"`
-}
-
-type NodesStatsNodeJVMBufferPool struct {
- Count int64 `json:"count"`
- TotalCapacity string `json:"total_capacity"`
- TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"`
-}
-
-type NodesStatsNodeJVMClasses struct {
- CurrentLoadedCount int64 `json:"current_loaded_count"`
- TotalLoadedCount int64 `json:"total_loaded_count"`
- TotalUnloadedCount int64 `json:"total_unloaded_count"`
-}
-
-type NodesStatsNodeThreadPool struct {
- Threads int `json:"threads"`
- Queue int `json:"queue"`
- Active int `json:"active"`
- Rejected int64 `json:"rejected"`
- Largest int `json:"largest"`
- Completed int64 `json:"completed"`
-}
-
-type NodesStatsNodeFS struct {
- Timestamp int64 `json:"timestamp"`
- Total *NodesStatsNodeFSEntry `json:"total"`
- Data []*NodesStatsNodeFSEntry `json:"data"`
- IOStats *NodesStatsNodeFSIOStats `json:"io_stats"`
-}
-
-type NodesStatsNodeFSEntry struct {
- Path string `json:"path"`
- Mount string `json:"mount"`
- Type string `json:"type"`
- Total string `json:"total"`
- TotalInBytes int64 `json:"total_in_bytes"`
- Free string `json:"free"`
- FreeInBytes int64 `json:"free_in_bytes"`
- Available string `json:"available"`
- AvailableInBytes int64 `json:"available_in_bytes"`
- Spins string `json:"spins"`
-}
-
-type NodesStatsNodeFSIOStats struct {
- Devices []*NodesStatsNodeFSIOStatsEntry `json:"devices"`
- Total *NodesStatsNodeFSIOStatsEntry `json:"total"`
-}
-
-type NodesStatsNodeFSIOStatsEntry struct {
- DeviceName string `json:"device_name"`
- Operations int64 `json:"operations"`
- ReadOperations int64 `json:"read_operations"`
- WriteOperations int64 `json:"write_operations"`
- ReadKilobytes int64 `json:"read_kilobytes"`
- WriteKilobytes int64 `json:"write_kilobytes"`
-}
-
-type NodesStatsNodeTransport struct {
- ServerOpen int `json:"server_open"`
- RxCount int64 `json:"rx_count"`
- RxSize string `json:"rx_size"`
- RxSizeInBytes int64 `json:"rx_size_in_bytes"`
- TxCount int64 `json:"tx_count"`
- TxSize string `json:"tx_size"`
- TxSizeInBytes int64 `json:"tx_size_in_bytes"`
-}
-
-type NodesStatsNodeHTTP struct {
- CurrentOpen int `json:"current_open"`
- TotalOpened int `json:"total_opened"`
-}
-
-type NodesStatsBreaker struct {
- LimitSize string `json:"limit_size"`
- LimitSizeInBytes int64 `json:"limit_size_in_bytes"`
- EstimatedSize string `json:"estimated_size"`
- EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"`
- Overhead float64 `json:"overhead"`
- Tripped int64 `json:"tripped"`
-}
-
-type NodesStatsScriptStats struct {
- Compilations int64 `json:"compilations"`
- CacheEvictions int64 `json:"cache_evictions"`
-}
-
-type NodesStatsDiscovery struct {
- ClusterStateQueue *NodesStatsDiscoveryStats `json:"cluster_state_queue"`
-}
-
-type NodesStatsDiscoveryStats struct {
- Total int64 `json:"total"`
- Pending int64 `json:"pending"`
- Committed int64 `json:"committed"`
-}
-
-type NodesStatsIngest struct {
- Total *NodesStatsIngestStats `json:"total"`
- Pipelines interface{} `json:"pipelines"`
-}
-
-type NodesStatsIngestStats struct {
- Count int64 `json:"count"`
- Time string `json:"time"`
- TimeInMillis int64 `json:"time_in_millis"`
- Current int64 `json:"current"`
- Failed int64 `json:"failed"`
-}
diff --git a/vendor/github.com/olivere/elastic/nodes_stats_test.go b/vendor/github.com/olivere/elastic/nodes_stats_test.go
deleted file mode 100644
index 4b249a2f4..000000000
--- a/vendor/github.com/olivere/elastic/nodes_stats_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestNodesStats(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
-
- info, err := client.NodesStats().Human(true).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if info == nil {
- t.Fatal("expected nodes stats")
- }
-
- if info.ClusterName == "" {
- t.Errorf("expected cluster name; got: %q", info.ClusterName)
- }
- if len(info.Nodes) == 0 {
- t.Errorf("expected some nodes; got: %d", len(info.Nodes))
- }
- for id, node := range info.Nodes {
- if id == "" {
- t.Errorf("expected node id; got: %q", id)
- }
- if node == nil {
- t.Fatalf("expected node info; got: %v", node)
- }
- if len(node.Name) == 0 {
- t.Errorf("expected node name; got: %q", node.Name)
- }
- if node.Timestamp == 0 {
- t.Errorf("expected timestamp; got: %q", node.Timestamp)
- }
- }
-}
-
-func TestNodesStatsBuildURL(t *testing.T) {
- tests := []struct {
- NodeIds []string
- Metrics []string
- IndexMetrics []string
- Expected string
- }{
- {
- NodeIds: nil,
- Metrics: nil,
- IndexMetrics: nil,
- Expected: "/_nodes/stats",
- },
- {
- NodeIds: []string{"node1"},
- Metrics: nil,
- IndexMetrics: nil,
- Expected: "/_nodes/node1/stats",
- },
- {
- NodeIds: []string{"node1", "node2"},
- Metrics: nil,
- IndexMetrics: nil,
- Expected: "/_nodes/node1%2Cnode2/stats",
- },
- {
- NodeIds: nil,
- Metrics: []string{"indices"},
- IndexMetrics: nil,
- Expected: "/_nodes/stats/indices",
- },
- {
- NodeIds: nil,
- Metrics: []string{"indices", "jvm"},
- IndexMetrics: nil,
- Expected: "/_nodes/stats/indices%2Cjvm",
- },
- {
- NodeIds: []string{"node1"},
- Metrics: []string{"indices", "jvm"},
- IndexMetrics: nil,
- Expected: "/_nodes/node1/stats/indices%2Cjvm",
- },
- {
- NodeIds: nil,
- Metrics: nil,
- IndexMetrics: []string{"fielddata"},
- Expected: "/_nodes/stats/_all/fielddata",
- },
- {
- NodeIds: []string{"node1"},
- Metrics: nil,
- IndexMetrics: []string{"fielddata"},
- Expected: "/_nodes/node1/stats/_all/fielddata",
- },
- {
- NodeIds: nil,
- Metrics: []string{"indices"},
- IndexMetrics: []string{"fielddata"},
- Expected: "/_nodes/stats/indices/fielddata",
- },
- {
- NodeIds: []string{"node1"},
- Metrics: []string{"indices"},
- IndexMetrics: []string{"fielddata"},
- Expected: "/_nodes/node1/stats/indices/fielddata",
- },
- {
- NodeIds: []string{"node1", "node2"},
- Metrics: []string{"indices", "jvm"},
- IndexMetrics: []string{"fielddata", "docs"},
- Expected: "/_nodes/node1%2Cnode2/stats/indices%2Cjvm/fielddata%2Cdocs",
- },
- }
-
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- for i, tt := range tests {
- svc := client.NodesStats().NodeId(tt.NodeIds...).Metric(tt.Metrics...).IndexMetric(tt.IndexMetrics...)
- path, _, err := svc.buildURL()
- if err != nil {
- t.Errorf("#%d: expected no error, got %v", i, err)
- } else {
- if want, have := tt.Expected, path; want != have {
- t.Errorf("#%d: expected %q, got %q", i, want, have)
- }
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/percolate_test.go b/vendor/github.com/olivere/elastic/percolate_test.go
deleted file mode 100644
index 3b3b2efb7..000000000
--- a/vendor/github.com/olivere/elastic/percolate_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestPercolate(t *testing.T) {
- //client := setupTestClientAndCreateIndex(t, SetErrorLog(log.New(os.Stdout, "", 0)))
- //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- // Create query index
- createQueryIndex, err := client.CreateIndex(testQueryIndex).Body(testQueryMapping).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createQueryIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createQueryIndex)
- }
-
- // Add a document
- _, err = client.Index().
- Index(testQueryIndex).
- Type("doc").
- Id("1").
- BodyJson(`{"query":{"match":{"message":"bonsai tree"}}}`).
- Refresh("wait_for").
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Percolate should return our registered query
- pq := NewPercolatorQuery().
- Field("query").
- DocumentType("doc").
- Document(doctype{Message: "A new bonsai tree in the office"})
- res, err := client.Search(testQueryIndex).Type("doc").Query(pq).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected results != nil; got nil")
- }
- if res.Hits == nil {
- t.Fatal("expected SearchResult.Hits != nil; got nil")
- }
- if got, want := res.Hits.TotalHits, int64(1); got != want {
- t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
- }
- if got, want := len(res.Hits.Hits), 1; got != want {
- t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
- }
- hit := res.Hits.Hits[0]
- if hit.Index != testQueryIndex {
- t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testQueryIndex, hit.Index)
- }
- got := string(*hit.Source)
- expected := `{"query":{"match":{"message":"bonsai tree"}}}`
- if got != expected {
- t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/ping.go b/vendor/github.com/olivere/elastic/ping.go
deleted file mode 100644
index 5c2d34f00..000000000
--- a/vendor/github.com/olivere/elastic/ping.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "net/http"
- "net/url"
-)
-
-// PingService checks if an Elasticsearch server on a given URL is alive.
-// When asked for, it can also return various information about the
-// Elasticsearch server, e.g. the Elasticsearch version number.
-//
-// Ping simply starts a HTTP GET request to the URL of the server.
-// If the server responds with HTTP Status code 200 OK, the server is alive.
-type PingService struct {
- client *Client
- url string
- timeout string
- httpHeadOnly bool
- pretty bool
-}
-
-// PingResult is the result returned from querying the Elasticsearch server.
-type PingResult struct {
- Name string `json:"name"`
- ClusterName string `json:"cluster_name"`
- Version struct {
- Number string `json:"number"`
- BuildHash string `json:"build_hash"`
- BuildTimestamp string `json:"build_timestamp"`
- BuildSnapshot bool `json:"build_snapshot"`
- LuceneVersion string `json:"lucene_version"`
- } `json:"version"`
- TagLine string `json:"tagline"`
-}
-
-func NewPingService(client *Client) *PingService {
- return &PingService{
- client: client,
- url: DefaultURL,
- httpHeadOnly: false,
- pretty: false,
- }
-}
-
-func (s *PingService) URL(url string) *PingService {
- s.url = url
- return s
-}
-
-func (s *PingService) Timeout(timeout string) *PingService {
- s.timeout = timeout
- return s
-}
-
-// HeadOnly makes the service to only return the status code in Do;
-// the PingResult will be nil.
-func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
- s.httpHeadOnly = httpHeadOnly
- return s
-}
-
-func (s *PingService) Pretty(pretty bool) *PingService {
- s.pretty = pretty
- return s
-}
-
-// Do returns the PingResult, the HTTP status code of the Elasticsearch
-// server, and an error.
-func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) {
- s.client.mu.RLock()
- basicAuth := s.client.basicAuth
- basicAuthUsername := s.client.basicAuthUsername
- basicAuthPassword := s.client.basicAuthPassword
- s.client.mu.RUnlock()
-
- url_ := s.url + "/"
-
- params := make(url.Values)
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.pretty {
- params.Set("pretty", "true")
- }
- if len(params) > 0 {
- url_ += "?" + params.Encode()
- }
-
- var method string
- if s.httpHeadOnly {
- method = "HEAD"
- } else {
- method = "GET"
- }
-
- // Notice: This service must NOT use PerformRequest!
- req, err := NewRequest(method, url_)
- if err != nil {
- return nil, 0, err
- }
-
- if basicAuth {
- req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
- }
-
- res, err := s.client.c.Do((*http.Request)(req).WithContext(ctx))
- if err != nil {
- return nil, 0, err
- }
- defer res.Body.Close()
-
- var ret *PingResult
- if !s.httpHeadOnly {
- ret = new(PingResult)
- if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
- return nil, res.StatusCode, err
- }
- }
-
- return ret, res.StatusCode, nil
-}
diff --git a/vendor/github.com/olivere/elastic/ping_test.go b/vendor/github.com/olivere/elastic/ping_test.go
deleted file mode 100644
index 273913803..000000000
--- a/vendor/github.com/olivere/elastic/ping_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "net/http"
- "testing"
-)
-
-func TestPingGet(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- res, code, err := client.Ping(DefaultURL).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if code != http.StatusOK {
- t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
- }
- if res == nil {
- t.Fatalf("expected to return result, got: %v", res)
- }
- if res.Name == "" {
- t.Errorf("expected Name != \"\"; got %q", res.Name)
- }
- if res.Version.Number == "" {
- t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number)
- }
-}
-
-func TestPingHead(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if code != http.StatusOK {
- t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
- }
- if res != nil {
- t.Errorf("expected not to return result, got: %v", res)
- }
-}
-
-func TestPingHeadFailure(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- res, code, err := client.
- Ping("http://127.0.0.1:9299").
- HttpHeadOnly(true).
- Do(context.TODO())
- if err == nil {
- t.Error("expected error, got nil")
- }
- if code == http.StatusOK {
- t.Errorf("expected status code != %d; got %d", http.StatusOK, code)
- }
- if res != nil {
- t.Errorf("expected not to return result, got: %v", res)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/plugins.go b/vendor/github.com/olivere/elastic/plugins.go
deleted file mode 100644
index 60bda7552..000000000
--- a/vendor/github.com/olivere/elastic/plugins.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "context"
-
-// HasPlugin indicates whether the cluster has the named plugin.
-func (c *Client) HasPlugin(name string) (bool, error) {
- plugins, err := c.Plugins()
- if err != nil {
- return false, nil
- }
- for _, plugin := range plugins {
- if plugin == name {
- return true, nil
- }
- }
- return false, nil
-}
-
-// Plugins returns the list of all registered plugins.
-func (c *Client) Plugins() ([]string, error) {
- stats, err := c.ClusterStats().Do(context.Background())
- if err != nil {
- return nil, err
- }
- if stats == nil {
- return nil, err
- }
- if stats.Nodes == nil {
- return nil, err
- }
- var plugins []string
- for _, plugin := range stats.Nodes.Plugins {
- plugins = append(plugins, plugin.Name)
- }
- return plugins, nil
-}
diff --git a/vendor/github.com/olivere/elastic/plugins_test.go b/vendor/github.com/olivere/elastic/plugins_test.go
deleted file mode 100644
index 969f0b0e5..000000000
--- a/vendor/github.com/olivere/elastic/plugins_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestClientPlugins(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Plugins()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestClientHasPlugin(t *testing.T) {
- client, err := NewClient()
- if err != nil {
- t.Fatal(err)
- }
- found, err := client.HasPlugin("no-such-plugin")
- if err != nil {
- t.Fatal(err)
- }
- if found {
- t.Fatalf("expected to not find plugin %q", "no-such-plugin")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/query.go b/vendor/github.com/olivere/elastic/query.go
deleted file mode 100644
index ad01354a0..000000000
--- a/vendor/github.com/olivere/elastic/query.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// Query represents the generic query interface. A query's sole purpose
-// is to return the source of the query as a JSON-serializable object.
-// Returning map[string]interface{} is the norm for queries.
-type Query interface {
- // Source returns the JSON-serializable query request.
- Source() (interface{}, error)
-}
diff --git a/vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go b/vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go
deleted file mode 100644
index 5a8ab39d0..000000000
--- a/vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-// BulkInsert illustrates how to bulk insert documents into Elasticsearch.
-//
-// It uses two goroutines to do so. The first creates a simple document
-// and sends it to the second via a channel. The second goroutine collects
-// those documents, creates a bulk request that is added to a Bulk service
-// and committed to Elasticsearch after reaching a number of documents.
-// The number of documents after which a commit happens can be specified
-// via the "bulk-size" flag.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
-// for details on the Bulk API in Elasticsearch.
-//
-// Example
-//
-// Bulk index 100.000 documents into the index "warehouse", type "product",
-// committing every set of 1.000 documents.
-//
-// bulk_insert -index=warehouse -type=product -n=100000 -bulk-size=1000
-//
-package main
-
-import (
- "context"
- "encoding/base64"
- "errors"
- "flag"
- "fmt"
- "log"
- "math/rand"
- "sync/atomic"
- "time"
-
- "golang.org/x/sync/errgroup"
- "github.com/olivere/elastic"
-)
-
-func main() {
- var (
- url = flag.String("url", "http://localhost:9200", "Elasticsearch URL")
- index = flag.String("index", "", "Elasticsearch index name")
- typ = flag.String("type", "", "Elasticsearch type name")
- sniff = flag.Bool("sniff", true, "Enable or disable sniffing")
- n = flag.Int("n", 0, "Number of documents to bulk insert")
- bulkSize = flag.Int("bulk-size", 0, "Number of documents to collect before committing")
- )
- flag.Parse()
- log.SetFlags(0)
- rand.Seed(time.Now().UnixNano())
-
- if *url == "" {
- log.Fatal("missing url parameter")
- }
- if *index == "" {
- log.Fatal("missing index parameter")
- }
- if *typ == "" {
- log.Fatal("missing type parameter")
- }
- if *n <= 0 {
- log.Fatal("n must be a positive number")
- }
- if *bulkSize <= 0 {
- log.Fatal("bulk-size must be a positive number")
- }
-
- // Create an Elasticsearch client
- client, err := elastic.NewClient(elastic.SetURL(*url), elastic.SetSniff(*sniff))
- if err != nil {
- log.Fatal(err)
- }
-
- // Setup a group of goroutines from the excellent errgroup package
- g, ctx := errgroup.WithContext(context.TODO())
-
- // The first goroutine will emit documents and send it to the second goroutine
- // via the docsc channel.
- // The second Goroutine will simply bulk insert the documents.
- type doc struct {
- ID string `json:"id"`
- Timestamp time.Time `json:"@timestamp"`
- }
- docsc := make(chan doc)
-
- begin := time.Now()
-
- // Goroutine to create documents
- g.Go(func() error {
- defer close(docsc)
-
- buf := make([]byte, 32)
- for i := 0; i < *n; i++ {
- // Generate a random ID
- _, err := rand.Read(buf)
- if err != nil {
- return err
- }
- id := base64.URLEncoding.EncodeToString(buf)
-
- // Construct the document
- d := doc{
- ID: id,
- Timestamp: time.Now(),
- }
-
- // Send over to 2nd goroutine, or cancel
- select {
- case docsc <- d:
- case <-ctx.Done():
- return ctx.Err()
- }
- }
- return nil
- })
-
- // Second goroutine will consume the documents sent from the first and bulk insert into ES
- var total uint64
- g.Go(func() error {
- bulk := client.Bulk().Index(*index).Type(*typ)
- for d := range docsc {
- // Simple progress
- current := atomic.AddUint64(&total, 1)
- dur := time.Since(begin).Seconds()
- sec := int(dur)
- pps := int64(float64(current) / dur)
- fmt.Printf("%10d | %6d req/s | %02d:%02d\r", current, pps, sec/60, sec%60)
-
- // Enqueue the document
- bulk.Add(elastic.NewBulkIndexRequest().Id(d.ID).Doc(d))
- if bulk.NumberOfActions() >= *bulkSize {
- // Commit
- res, err := bulk.Do(ctx)
- if err != nil {
- return err
- }
- if res.Errors {
- // Look up the failed documents with res.Failed(), and e.g. recommit
- return errors.New("bulk commit failed")
- }
- // "bulk" is reset after Do, so you can reuse it
- }
-
- select {
- default:
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-
- // Commit the final batch before exiting
- if bulk.NumberOfActions() > 0 {
- _, err = bulk.Do(ctx)
- if err != nil {
- return err
- }
- }
- return nil
- })
-
- // Wait until all goroutines are finished
- if err := g.Wait(); err != nil {
- log.Fatal(err)
- }
-
- // Final results
- dur := time.Since(begin).Seconds()
- sec := int(dur)
- pps := int64(float64(total) / dur)
- fmt.Printf("%10d | %6d req/s | %02d:%02d\n", total, pps, sec/60, sec%60)
-}
diff --git a/vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go b/vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go
deleted file mode 100644
index f13243297..000000000
--- a/vendor/github.com/olivere/elastic/recipes/bulk_processor/main.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-// BulkProcessor runs a bulk processing job that fills an index
-// given certain criteria like flush interval etc.
-//
-// Example
-//
-// bulk_processor -url=http://127.0.0.1:9200/bulk-processor-test?sniff=false -n=100000 -flush-interval=1s
-//
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "log"
- "math/rand"
- "os"
- "os/signal"
- "sync/atomic"
- "syscall"
- "time"
-
- "github.com/google/uuid"
-
- "github.com/olivere/elastic"
- "github.com/olivere/elastic/config"
-)
-
-func main() {
- var (
- url = flag.String("url", "http://localhost:9200/bulk-processor-test", "Elasticsearch URL")
- numWorkers = flag.Int("num-workers", 4, "Number of workers")
- n = flag.Int64("n", -1, "Number of documents to process (-1 for unlimited)")
- flushInterval = flag.Duration("flush-interval", 1*time.Second, "Flush interval")
- bulkActions = flag.Int("bulk-actions", 0, "Number of bulk actions before committing")
- bulkSize = flag.Int("bulk-size", 0, "Size of bulk requests before committing")
- )
- flag.Parse()
- log.SetFlags(0)
-
- rand.Seed(time.Now().UnixNano())
-
- // Parse configuration from URL
- cfg, err := config.Parse(*url)
- if err != nil {
- log.Fatal(err)
- }
-
- // Create an Elasticsearch client from the parsed config
- client, err := elastic.NewClientFromConfig(cfg)
- if err != nil {
- log.Fatal(err)
- }
-
- // Drop old index
- exists, err := client.IndexExists(cfg.Index).Do(context.Background())
- if err != nil {
- log.Fatal(err)
- }
- if exists {
- _, err = client.DeleteIndex(cfg.Index).Do(context.Background())
- if err != nil {
- log.Fatal(err)
- }
- }
-
- // Create processor
- bulkp := elastic.NewBulkProcessorService(client).
- Name("bulk-test-processor").
- Stats(true).
- Backoff(elastic.StopBackoff{}).
- FlushInterval(*flushInterval).
- Workers(*numWorkers)
- if *bulkActions > 0 {
- bulkp = bulkp.BulkActions(*bulkActions)
- }
- if *bulkSize > 0 {
- bulkp = bulkp.BulkSize(*bulkSize)
- }
- p, err := bulkp.Do(context.Background())
- if err != nil {
- log.Fatal(err)
- }
-
- var created int64
- errc := make(chan error, 1)
- go func() {
- c := make(chan os.Signal, 1)
- signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
- <-c
- errc <- nil
- }()
-
- go func() {
- defer func() {
- if err := p.Close(); err != nil {
- errc <- err
- }
- }()
-
- type Doc struct {
- Timestamp time.Time `json:"@timestamp"`
- }
-
- for {
- current := atomic.AddInt64(&created, 1)
- if *n > 0 && current >= *n {
- errc <- nil
- return
- }
- r := elastic.NewBulkIndexRequest().
- Index(cfg.Index).
- Type("doc").
- Id(uuid.New().String()).
- Doc(Doc{Timestamp: time.Now()})
- p.Add(r)
-
- time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
- }
- }()
-
- go func() {
- t := time.NewTicker(1 * time.Second)
- defer t.Stop()
- for range t.C {
- stats := p.Stats()
- written := atomic.LoadInt64(&created)
- var queued int64
- for _, w := range stats.Workers {
- queued += w.Queued
- }
- fmt.Printf("Queued=%5d Written=%8d Succeeded=%8d Failed=%8d Comitted=%6d Flushed=%6d\n",
- queued,
- written,
- stats.Succeeded,
- stats.Failed,
- stats.Committed,
- stats.Flushed,
- )
- }
- }()
-
- if err := <-errc; err != nil {
- log.Fatal(err)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/recipes/connect/connect.go b/vendor/github.com/olivere/elastic/recipes/connect/connect.go
deleted file mode 100644
index baff6c114..000000000
--- a/vendor/github.com/olivere/elastic/recipes/connect/connect.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-// Connect simply connects to Elasticsearch.
-//
-// Example
-//
-//
-// connect -url=http://127.0.0.1:9200 -sniff=false
-//
-package main
-
-import (
- "flag"
- "fmt"
- "log"
-
- "github.com/olivere/elastic"
-)
-
-func main() {
- var (
- url = flag.String("url", "http://localhost:9200", "Elasticsearch URL")
- sniff = flag.Bool("sniff", true, "Enable or disable sniffing")
- )
- flag.Parse()
- log.SetFlags(0)
-
- if *url == "" {
- *url = "http://127.0.0.1:9200"
- }
-
- // Create an Elasticsearch client
- client, err := elastic.NewClient(elastic.SetURL(*url), elastic.SetSniff(*sniff))
- if err != nil {
- log.Fatal(err)
- }
- _ = client
-
- // Just a status message
- fmt.Println("Connection succeeded")
-}
diff --git a/vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go b/vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go
deleted file mode 100644
index d753a61cb..000000000
--- a/vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-// SlicedScroll illustrates scrolling through a set of documents
-// in parallel. It uses the sliced scrolling feature introduced
-// in Elasticsearch 5.0 to create a number of Goroutines, each
-// scrolling through a slice of the total results. A second goroutine
-// receives the hits from the set of goroutines scrolling through
-// the slices and simply counts the total number and the number of
-// documents received per slice.
-//
-// The speedup of sliced scrolling can be significant but is very
-// dependent on the specific use case.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
-// for details on sliced scrolling in Elasticsearch.
-//
-// Example
-//
-// Scroll with 4 parallel slices through an index called "products".
-// Use "_uid" as the default field:
-//
-// sliced_scroll -index=products -n=4
-//
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "io"
- "log"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/sync/errgroup"
- "github.com/olivere/elastic"
-)
-
-func main() {
- var (
- url = flag.String("url", "http://localhost:9200", "Elasticsearch URL")
- index = flag.String("index", "", "Elasticsearch index name")
- typ = flag.String("type", "", "Elasticsearch type name")
- field = flag.String("field", "", "Slice field (must be numeric)")
- numSlices = flag.Int("n", 2, "Number of slices to use in parallel")
- sniff = flag.Bool("sniff", true, "Enable or disable sniffing")
- )
- flag.Parse()
- log.SetFlags(0)
-
- if *url == "" {
- log.Fatal("missing url parameter")
- }
- if *index == "" {
- log.Fatal("missing index parameter")
- }
- if *numSlices <= 0 {
- log.Fatal("n must be greater than zero")
- }
-
- // Create an Elasticsearch client
- client, err := elastic.NewClient(elastic.SetURL(*url), elastic.SetSniff(*sniff))
- if err != nil {
- log.Fatal(err)
- }
-
- // Setup a group of goroutines from the excellent errgroup package
- g, ctx := errgroup.WithContext(context.TODO())
-
- // Hits channel will be sent to from the first set of goroutines and consumed by the second
- type hit struct {
- Slice int
- Hit elastic.SearchHit
- }
- hitsc := make(chan hit)
-
- begin := time.Now()
-
- // Start a number of goroutines to parallelize scrolling
- var wg sync.WaitGroup
- for i := 0; i < *numSlices; i++ {
- wg.Add(1)
-
- slice := i
-
- // Prepare the query
- var query elastic.Query
- if *typ == "" {
- query = elastic.NewMatchAllQuery()
- } else {
- query = elastic.NewTypeQuery(*typ)
- }
-
- // Prepare the slice
- sliceQuery := elastic.NewSliceQuery().Id(i).Max(*numSlices)
- if *field != "" {
- sliceQuery = sliceQuery.Field(*field)
- }
-
- // Start goroutine for this sliced scroll
- g.Go(func() error {
- defer wg.Done()
- svc := client.Scroll(*index).Query(query).Slice(sliceQuery)
- for {
- res, err := svc.Do(ctx)
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
- for _, searchHit := range res.Hits.Hits {
- // Pass the hit to the hits channel, which will be consumed below
- select {
- case hitsc <- hit{Slice: slice, Hit: *searchHit}:
- case <-ctx.Done():
- return ctx.Err()
- }
- }
- }
- return nil
- })
- }
- go func() {
- // Wait until all scrolling is done
- wg.Wait()
- close(hitsc)
- }()
-
- // Second goroutine will consume the hits sent from the workers in first set of goroutines
- var total uint64
- totals := make([]uint64, *numSlices)
- g.Go(func() error {
- for hit := range hitsc {
- // We simply count the hits here.
- atomic.AddUint64(&totals[hit.Slice], 1)
- current := atomic.AddUint64(&total, 1)
- sec := int(time.Since(begin).Seconds())
- fmt.Printf("%8d | %02d:%02d\r", current, sec/60, sec%60)
- select {
- default:
- case <-ctx.Done():
- return ctx.Err()
- }
- }
- return nil
- })
-
- // Wait until all goroutines are finished
- if err := g.Wait(); err != nil {
- log.Fatal(err)
- }
-
- fmt.Printf("Scrolled through a total of %d documents in %v\n", total, time.Since(begin))
- for i := 0; i < *numSlices; i++ {
- fmt.Printf("Slice %2d received %d documents\n", i, totals[i])
- }
-}
diff --git a/vendor/github.com/olivere/elastic/reindex.go b/vendor/github.com/olivere/elastic/reindex.go
deleted file mode 100644
index 9cdd50a68..000000000
--- a/vendor/github.com/olivere/elastic/reindex.go
+++ /dev/null
@@ -1,695 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-)
-
-// ReindexService is a method to copy documents from one index to another.
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html.
-type ReindexService struct {
- client *Client
- pretty bool
- refresh string
- timeout string
- waitForActiveShards string
- waitForCompletion *bool
- requestsPerSecond *int
- slices *int
- body interface{}
- source *ReindexSource
- destination *ReindexDestination
- conflicts string
- size *int
- script *Script
-}
-
-// NewReindexService creates a new ReindexService.
-func NewReindexService(client *Client) *ReindexService {
- return &ReindexService{
- client: client,
- }
-}
-
-// WaitForActiveShards sets the number of shard copies that must be active before
-// proceeding with the reindex operation. Defaults to 1, meaning the primary shard only.
-// Set to `all` for all shard copies, otherwise set to any non-negative value less than or
-// equal to the total number of copies for the shard (number of replicas + 1).
-func (s *ReindexService) WaitForActiveShards(waitForActiveShards string) *ReindexService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// RequestsPerSecond specifies the throttle to set on this request in sub-requests per second.
-// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
-func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexService {
- s.requestsPerSecond = &requestsPerSecond
- return s
-}
-
-// Slices specifies the number of slices this task should be divided into. Defaults to 1.
-func (s *ReindexService) Slices(slices int) *ReindexService {
- s.slices = &slices
- return s
-}
-
-// Refresh indicates whether Elasticsearch should refresh the effected indexes
-// immediately.
-func (s *ReindexService) Refresh(refresh string) *ReindexService {
- s.refresh = refresh
- return s
-}
-
-// Timeout is the time each individual bulk request should wait for shards
-// that are unavailable.
-func (s *ReindexService) Timeout(timeout string) *ReindexService {
- s.timeout = timeout
- return s
-}
-
-// WaitForCompletion indicates whether Elasticsearch should block until the
-// reindex is complete.
-func (s *ReindexService) WaitForCompletion(waitForCompletion bool) *ReindexService {
- s.waitForCompletion = &waitForCompletion
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *ReindexService) Pretty(pretty bool) *ReindexService {
- s.pretty = pretty
- return s
-}
-
-// Source specifies the source of the reindexing process.
-func (s *ReindexService) Source(source *ReindexSource) *ReindexService {
- s.source = source
- return s
-}
-
-// SourceIndex specifies the source index of the reindexing process.
-func (s *ReindexService) SourceIndex(index string) *ReindexService {
- if s.source == nil {
- s.source = NewReindexSource()
- }
- s.source = s.source.Index(index)
- return s
-}
-
-// Destination specifies the destination of the reindexing process.
-func (s *ReindexService) Destination(destination *ReindexDestination) *ReindexService {
- s.destination = destination
- return s
-}
-
-// DestinationIndex specifies the destination index of the reindexing process.
-func (s *ReindexService) DestinationIndex(index string) *ReindexService {
- if s.destination == nil {
- s.destination = NewReindexDestination()
- }
- s.destination = s.destination.Index(index)
- return s
-}
-
-// DestinationIndexAndType specifies both the destination index and type
-// of the reindexing process.
-func (s *ReindexService) DestinationIndexAndType(index, typ string) *ReindexService {
- if s.destination == nil {
- s.destination = NewReindexDestination()
- }
- s.destination = s.destination.Index(index)
- s.destination = s.destination.Type(typ)
- return s
-}
-
-// Conflicts indicates what to do when the process detects version conflicts.
-// Possible values are "proceed" and "abort".
-func (s *ReindexService) Conflicts(conflicts string) *ReindexService {
- s.conflicts = conflicts
- return s
-}
-
-// AbortOnVersionConflict aborts the request on version conflicts.
-// It is an alias to setting Conflicts("abort").
-func (s *ReindexService) AbortOnVersionConflict() *ReindexService {
- s.conflicts = "abort"
- return s
-}
-
-// ProceedOnVersionConflict aborts the request on version conflicts.
-// It is an alias to setting Conflicts("proceed").
-func (s *ReindexService) ProceedOnVersionConflict() *ReindexService {
- s.conflicts = "proceed"
- return s
-}
-
-// Size sets an upper limit for the number of processed documents.
-func (s *ReindexService) Size(size int) *ReindexService {
- s.size = &size
- return s
-}
-
-// Script allows for modification of the documents as they are reindexed
-// from source to destination.
-func (s *ReindexService) Script(script *Script) *ReindexService {
- s.script = script
- return s
-}
-
-// Body specifies the body of the request to send to Elasticsearch.
-// It overrides settings specified with other setters, e.g. Query.
-func (s *ReindexService) Body(body interface{}) *ReindexService {
- s.body = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *ReindexService) buildURL() (string, url.Values, error) {
- // Build URL path
- path := "/_reindex"
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.requestsPerSecond != nil {
- params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
- }
- if s.slices != nil {
- params.Set("slices", fmt.Sprintf("%v", *s.slices))
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
- if s.waitForCompletion != nil {
- params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *ReindexService) Validate() error {
- var invalid []string
- if s.body != nil {
- return nil
- }
- if s.source == nil {
- invalid = append(invalid, "Source")
- } else {
- if len(s.source.indices) == 0 {
- invalid = append(invalid, "Source.Index")
- }
- }
- if s.destination == nil {
- invalid = append(invalid, "Destination")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// getBody returns the body part of the document request.
-func (s *ReindexService) getBody() (interface{}, error) {
- if s.body != nil {
- return s.body, nil
- }
-
- body := make(map[string]interface{})
-
- if s.conflicts != "" {
- body["conflicts"] = s.conflicts
- }
- if s.size != nil {
- body["size"] = *s.size
- }
- if s.script != nil {
- out, err := s.script.Source()
- if err != nil {
- return nil, err
- }
- body["script"] = out
- }
-
- src, err := s.source.Source()
- if err != nil {
- return nil, err
- }
- body["source"] = src
-
- dst, err := s.destination.Source()
- if err != nil {
- return nil, err
- }
- body["dest"] = dst
-
- return body, nil
-}
-
-// Do executes the operation.
-func (s *ReindexService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- body, err := s.getBody()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(BulkIndexByScrollResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// DoAsync executes the reindexing operation asynchronously by starting a new task.
-// Callers need to use the Task Management API to watch the outcome of the reindexing
-// operation.
-func (s *ReindexService) DoAsync(ctx context.Context) (*StartTaskResult, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // DoAsync only makes sense with WaitForCompletion set to true
- if s.waitForCompletion != nil && *s.waitForCompletion {
- return nil, fmt.Errorf("cannot start a task with WaitForCompletion set to true")
- }
- f := false
- s.waitForCompletion = &f
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- body, err := s.getBody()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(StartTaskResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Source of Reindex --
-
-// ReindexSource specifies the source of a Reindex process.
-type ReindexSource struct {
- searchType string // default in ES is "query_then_fetch"
- indices []string
- types []string
- routing *string
- preference *string
- requestCache *bool
- scroll string
- query Query
- sorts []SortInfo
- sorters []Sorter
- searchSource *SearchSource
- remoteInfo *ReindexRemoteInfo
-}
-
-// NewReindexSource creates a new ReindexSource.
-func NewReindexSource() *ReindexSource {
- return &ReindexSource{}
-}
-
-// SearchType is the search operation type. Possible values are
-// "query_then_fetch" and "dfs_query_then_fetch".
-func (r *ReindexSource) SearchType(searchType string) *ReindexSource {
- r.searchType = searchType
- return r
-}
-
-func (r *ReindexSource) SearchTypeDfsQueryThenFetch() *ReindexSource {
- return r.SearchType("dfs_query_then_fetch")
-}
-
-func (r *ReindexSource) SearchTypeQueryThenFetch() *ReindexSource {
- return r.SearchType("query_then_fetch")
-}
-
-func (r *ReindexSource) Index(indices ...string) *ReindexSource {
- r.indices = append(r.indices, indices...)
- return r
-}
-
-func (r *ReindexSource) Type(types ...string) *ReindexSource {
- r.types = append(r.types, types...)
- return r
-}
-
-func (r *ReindexSource) Preference(preference string) *ReindexSource {
- r.preference = &preference
- return r
-}
-
-func (r *ReindexSource) RequestCache(requestCache bool) *ReindexSource {
- r.requestCache = &requestCache
- return r
-}
-
-func (r *ReindexSource) Scroll(scroll string) *ReindexSource {
- r.scroll = scroll
- return r
-}
-
-func (r *ReindexSource) Query(query Query) *ReindexSource {
- r.query = query
- return r
-}
-
-// Sort adds a sort order.
-func (s *ReindexSource) Sort(field string, ascending bool) *ReindexSource {
- s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending})
- return s
-}
-
-// SortWithInfo adds a sort order.
-func (s *ReindexSource) SortWithInfo(info SortInfo) *ReindexSource {
- s.sorts = append(s.sorts, info)
- return s
-}
-
-// SortBy adds a sort order.
-func (s *ReindexSource) SortBy(sorter ...Sorter) *ReindexSource {
- s.sorters = append(s.sorters, sorter...)
- return s
-}
-
-// RemoteInfo sets up reindexing from a remote cluster.
-func (s *ReindexSource) RemoteInfo(ri *ReindexRemoteInfo) *ReindexSource {
- s.remoteInfo = ri
- return s
-}
-
-// Source returns a serializable JSON request for the request.
-func (r *ReindexSource) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- if r.query != nil {
- src, err := r.query.Source()
- if err != nil {
- return nil, err
- }
- source["query"] = src
- } else if r.searchSource != nil {
- src, err := r.searchSource.Source()
- if err != nil {
- return nil, err
- }
- source["source"] = src
- }
-
- if r.searchType != "" {
- source["search_type"] = r.searchType
- }
-
- switch len(r.indices) {
- case 0:
- case 1:
- source["index"] = r.indices[0]
- default:
- source["index"] = r.indices
- }
-
- switch len(r.types) {
- case 0:
- case 1:
- source["type"] = r.types[0]
- default:
- source["type"] = r.types
- }
-
- if r.preference != nil && *r.preference != "" {
- source["preference"] = *r.preference
- }
-
- if r.requestCache != nil {
- source["request_cache"] = fmt.Sprintf("%v", *r.requestCache)
- }
-
- if r.scroll != "" {
- source["scroll"] = r.scroll
- }
-
- if r.remoteInfo != nil {
- src, err := r.remoteInfo.Source()
- if err != nil {
- return nil, err
- }
- source["remote"] = src
- }
-
- if len(r.sorters) > 0 {
- var sortarr []interface{}
- for _, sorter := range r.sorters {
- src, err := sorter.Source()
- if err != nil {
- return nil, err
- }
- sortarr = append(sortarr, src)
- }
- source["sort"] = sortarr
- } else if len(r.sorts) > 0 {
- var sortarr []interface{}
- for _, sort := range r.sorts {
- src, err := sort.Source()
- if err != nil {
- return nil, err
- }
- sortarr = append(sortarr, src)
- }
- source["sort"] = sortarr
- }
-
- return source, nil
-}
-
-// ReindexRemoteInfo contains information for reindexing from a remote cluster.
-type ReindexRemoteInfo struct {
- host string
- username string
- password string
- socketTimeout string // e.g. "1m" or "30s"
- connectTimeout string // e.g. "1m" or "30s"
-}
-
-// NewReindexRemoteInfo creates a new ReindexRemoteInfo.
-func NewReindexRemoteInfo() *ReindexRemoteInfo {
- return &ReindexRemoteInfo{}
-}
-
-// Host sets the host information of the remote cluster.
-// It must be of the form "http(s)://<hostname>:<port>"
-func (ri *ReindexRemoteInfo) Host(host string) *ReindexRemoteInfo {
- ri.host = host
- return ri
-}
-
-// Username sets the username to authenticate with the remote cluster.
-func (ri *ReindexRemoteInfo) Username(username string) *ReindexRemoteInfo {
- ri.username = username
- return ri
-}
-
-// Password sets the password to authenticate with the remote cluster.
-func (ri *ReindexRemoteInfo) Password(password string) *ReindexRemoteInfo {
- ri.password = password
- return ri
-}
-
-// SocketTimeout sets the socket timeout to connect with the remote cluster.
-// Use ES compatible values like e.g. "30s" or "1m".
-func (ri *ReindexRemoteInfo) SocketTimeout(timeout string) *ReindexRemoteInfo {
- ri.socketTimeout = timeout
- return ri
-}
-
-// ConnectTimeout sets the connection timeout to connect with the remote cluster.
-// Use ES compatible values like e.g. "30s" or "1m".
-func (ri *ReindexRemoteInfo) ConnectTimeout(timeout string) *ReindexRemoteInfo {
- ri.connectTimeout = timeout
- return ri
-}
-
-// Source returns the serializable JSON data for the request.
-func (ri *ReindexRemoteInfo) Source() (interface{}, error) {
- res := make(map[string]interface{})
- res["host"] = ri.host
- if len(ri.username) > 0 {
- res["username"] = ri.username
- }
- if len(ri.password) > 0 {
- res["password"] = ri.password
- }
- if len(ri.socketTimeout) > 0 {
- res["socket_timeout"] = ri.socketTimeout
- }
- if len(ri.connectTimeout) > 0 {
- res["connect_timeout"] = ri.connectTimeout
- }
- return res, nil
-}
-
-// -source Destination of Reindex --
-
-// ReindexDestination is the destination of a Reindex API call.
-// It is basically the meta data of a BulkIndexRequest.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html
-// fsourcer details.
-type ReindexDestination struct {
- index string
- typ string
- routing string
- parent string
- opType string
- version int64 // default is MATCH_ANY
- versionType string // default is "internal"
-}
-
-// NewReindexDestination returns a new ReindexDestination.
-func NewReindexDestination() *ReindexDestination {
- return &ReindexDestination{}
-}
-
-// Index specifies name of the Elasticsearch index to use as the destination
-// of a reindexing process.
-func (r *ReindexDestination) Index(index string) *ReindexDestination {
- r.index = index
- return r
-}
-
-// Type specifies the Elasticsearch type to use for reindexing.
-func (r *ReindexDestination) Type(typ string) *ReindexDestination {
- r.typ = typ
- return r
-}
-
-// Routing specifies a routing value for the reindexing request.
-// It can be "keep", "discard", or start with "=". The latter specifies
-// the routing on the bulk request.
-func (r *ReindexDestination) Routing(routing string) *ReindexDestination {
- r.routing = routing
- return r
-}
-
-// Keep sets the routing on the bulk request sent for each match to the routing
-// of the match (the default).
-func (r *ReindexDestination) Keep() *ReindexDestination {
- r.routing = "keep"
- return r
-}
-
-// Discard sets the routing on the bulk request sent for each match to null.
-func (r *ReindexDestination) Discard() *ReindexDestination {
- r.routing = "discard"
- return r
-}
-
-// Parent specifies the identifier of the parent document (if available).
-func (r *ReindexDestination) Parent(parent string) *ReindexDestination {
- r.parent = parent
- return r
-}
-
-// OpType specifies if this request should follow create-only or upsert
-// behavior. This follows the OpType of the standard document index API.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#operation-type
-// for details.
-func (r *ReindexDestination) OpType(opType string) *ReindexDestination {
- r.opType = opType
- return r
-}
-
-// Version indicates the version of the document as part of an optimistic
-// concurrency model.
-func (r *ReindexDestination) Version(version int64) *ReindexDestination {
- r.version = version
- return r
-}
-
-// VersionType specifies how versions are created.
-func (r *ReindexDestination) VersionType(versionType string) *ReindexDestination {
- r.versionType = versionType
- return r
-}
-
-// Source returns a serializable JSON request for the request.
-func (r *ReindexDestination) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if r.index != "" {
- source["index"] = r.index
- }
- if r.typ != "" {
- source["type"] = r.typ
- }
- if r.routing != "" {
- source["routing"] = r.routing
- }
- if r.opType != "" {
- source["op_type"] = r.opType
- }
- if r.parent != "" {
- source["parent"] = r.parent
- }
- if r.version > 0 {
- source["version"] = r.version
- }
- if r.versionType != "" {
- source["version_type"] = r.versionType
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/reindex_test.go b/vendor/github.com/olivere/elastic/reindex_test.go
deleted file mode 100644
index fadf4bfc7..000000000
--- a/vendor/github.com/olivere/elastic/reindex_test.go
+++ /dev/null
@@ -1,401 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestReindexSourceWithBodyMap(t *testing.T) {
- client := setupTestClient(t)
- out, err := client.Reindex().Body(map[string]interface{}{
- "source": map[string]interface{}{
- "index": "twitter",
- },
- "dest": map[string]interface{}{
- "index": "new_twitter",
- },
- }).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithBodyString(t *testing.T) {
- client := setupTestClient(t)
- got, err := client.Reindex().Body(`{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}`).getBody()
- if err != nil {
- t.Fatal(err)
- }
- want := `{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithSourceIndexAndDestinationIndex(t *testing.T) {
- client := setupTestClient(t)
- out, err := client.Reindex().SourceIndex("twitter").DestinationIndex("new_twitter").getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithSourceAndDestinationAndVersionType(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter")
- dst := NewReindexDestination().Index("new_twitter").VersionType("external")
- out, err := client.Reindex().Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter","version_type":"external"},"source":{"index":"twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithSourceAndRemoteAndDestination(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter").RemoteInfo(
- NewReindexRemoteInfo().Host("http://otherhost:9200").
- Username("alice").
- Password("secret").
- ConnectTimeout("10s").
- SocketTimeout("1m"),
- )
- dst := NewReindexDestination().Index("new_twitter")
- out, err := client.Reindex().Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter","remote":{"connect_timeout":"10s","host":"http://otherhost:9200","password":"secret","socket_timeout":"1m","username":"alice"}}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithSourceAndDestinationAndOpType(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter")
- dst := NewReindexDestination().Index("new_twitter").OpType("create")
- out, err := client.Reindex().Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithConflictsProceed(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter")
- dst := NewReindexDestination().Index("new_twitter").OpType("create")
- out, err := client.Reindex().Conflicts("proceed").Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"conflicts":"proceed","dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithProceedOnVersionConflict(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter")
- dst := NewReindexDestination().Index("new_twitter").OpType("create")
- out, err := client.Reindex().ProceedOnVersionConflict().Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"conflicts":"proceed","dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithQuery(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter").Type("doc").Query(NewTermQuery("user", "olivere"))
- dst := NewReindexDestination().Index("new_twitter")
- out, err := client.Reindex().Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter","query":{"term":{"user":"olivere"}},"type":"doc"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithMultipleSourceIndicesAndTypes(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter", "blog").Type("doc", "post")
- dst := NewReindexDestination().Index("all_together")
- out, err := client.Reindex().Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"all_together"},"source":{"index":["twitter","blog"],"type":["doc","post"]}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithSourceAndSize(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter").Sort("date", false)
- dst := NewReindexDestination().Index("new_twitter")
- out, err := client.Reindex().Size(10000).Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter"},"size":10000,"source":{"index":"twitter","sort":[{"date":{"order":"desc"}}]}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithScript(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("twitter")
- dst := NewReindexDestination().Index("new_twitter").VersionType("external")
- scr := NewScriptInline("if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}")
- out, err := client.Reindex().Source(src).Destination(dst).Script(scr).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"new_twitter","version_type":"external"},"script":{"source":"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"},"source":{"index":"twitter"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindexSourceWithRouting(t *testing.T) {
- client := setupTestClient(t)
- src := NewReindexSource().Index("source").Query(NewMatchQuery("company", "cat"))
- dst := NewReindexDestination().Index("dest").Routing("=cat")
- out, err := client.Reindex().Source(src).Destination(dst).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"dest":{"index":"dest","routing":"=cat"},"source":{"index":"source","query":{"match":{"company":{"query":"cat"}}}}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestReindex(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "2.3.0" {
- t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion)
- }
-
- sourceCount, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if sourceCount <= 0 {
- t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
- }
-
- targetCount, err := client.Count(testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if targetCount != 0 {
- t.Fatalf("expected %d documents; got: %d", 0, targetCount)
- }
-
- // Simple copying
- src := NewReindexSource().Index(testIndexName)
- dst := NewReindexDestination().Index(testIndexName2)
- res, err := client.Reindex().Source(src).Destination(dst).Refresh("true").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected result != nil")
- }
- if res.Total != sourceCount {
- t.Errorf("expected %d, got %d", sourceCount, res.Total)
- }
- if res.Updated != 0 {
- t.Errorf("expected %d, got %d", 0, res.Updated)
- }
- if res.Created != sourceCount {
- t.Errorf("expected %d, got %d", sourceCount, res.Created)
- }
-
- targetCount, err = client.Count(testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if targetCount != sourceCount {
- t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount)
- }
-}
-
-func TestReindexAsync(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "2.3.0" {
- t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion)
- }
-
- sourceCount, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if sourceCount <= 0 {
- t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
- }
-
- targetCount, err := client.Count(testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if targetCount != 0 {
- t.Fatalf("expected %d documents; got: %d", 0, targetCount)
- }
-
- // Simple copying
- src := NewReindexSource().Index(testIndexName)
- dst := NewReindexDestination().Index(testIndexName2)
- res, err := client.Reindex().Source(src).Destination(dst).DoAsync(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected result != nil")
- }
- if res.TaskId == "" {
- t.Errorf("expected a task id, got %+v", res)
- }
-
- tasksGetTask := client.TasksGetTask()
- taskStatus, err := tasksGetTask.TaskId(res.TaskId).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if taskStatus == nil {
- t.Fatal("expected task status result != nil")
- }
-}
-
-func TestReindexWithWaitForCompletionTrueCannotBeStarted(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "2.3.0" {
- t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion)
- }
-
- sourceCount, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if sourceCount <= 0 {
- t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
- }
-
- targetCount, err := client.Count(testIndexName2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if targetCount != 0 {
- t.Fatalf("expected %d documents; got: %d", 0, targetCount)
- }
-
- // DoAsync should fail when WaitForCompletion is true
- src := NewReindexSource().Index(testIndexName)
- dst := NewReindexDestination().Index(testIndexName2)
- _, err = client.Reindex().Source(src).Destination(dst).WaitForCompletion(true).DoAsync(context.TODO())
- if err == nil {
- t.Fatal("error should have been returned")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/request.go b/vendor/github.com/olivere/elastic/request.go
deleted file mode 100644
index 87d191965..000000000
--- a/vendor/github.com/olivere/elastic/request.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "bytes"
- "encoding/json"
- "io"
- "io/ioutil"
- "net/http"
- "runtime"
- "strings"
-)
-
-// Elasticsearch-specific HTTP request
-type Request http.Request
-
-// NewRequest is a http.Request and adds features such as encoding the body.
-func NewRequest(method, url string) (*Request, error) {
- req, err := http.NewRequest(method, url, nil)
- if err != nil {
- return nil, err
- }
- req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")")
- req.Header.Add("Accept", "application/json")
- req.Header.Set("Content-Type", "application/json")
- return (*Request)(req), nil
-}
-
-// SetBasicAuth wraps http.Request's SetBasicAuth.
-func (r *Request) SetBasicAuth(username, password string) {
- ((*http.Request)(r)).SetBasicAuth(username, password)
-}
-
-// SetBody encodes the body in the request.
-func (r *Request) SetBody(body interface{}) error {
- switch b := body.(type) {
- case string:
- return r.setBodyString(b)
- default:
- return r.setBodyJson(body)
- }
-}
-
-// setBodyJson encodes the body as a struct to be marshaled via json.Marshal.
-func (r *Request) setBodyJson(data interface{}) error {
- body, err := json.Marshal(data)
- if err != nil {
- return err
- }
- r.Header.Set("Content-Type", "application/json")
- r.setBodyReader(bytes.NewReader(body))
- return nil
-}
-
-// setBodyString encodes the body as a string.
-func (r *Request) setBodyString(body string) error {
- return r.setBodyReader(strings.NewReader(body))
-}
-
-// setBodyReader writes the body from an io.Reader.
-func (r *Request) setBodyReader(body io.Reader) error {
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = ioutil.NopCloser(body)
- }
- r.Body = rc
- if body != nil {
- switch v := body.(type) {
- case *strings.Reader:
- r.ContentLength = int64(v.Len())
- case *bytes.Buffer:
- r.ContentLength = int64(v.Len())
- }
- }
- return nil
-}
diff --git a/vendor/github.com/olivere/elastic/request_test.go b/vendor/github.com/olivere/elastic/request_test.go
deleted file mode 100644
index 04fbecbab..000000000
--- a/vendor/github.com/olivere/elastic/request_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-var testReq *Request // used as a temporary variable to avoid compiler optimizations in tests/benchmarks
-
-func TestRequestSetContentType(t *testing.T) {
- req, err := NewRequest("GET", "/")
- if err != nil {
- t.Fatal(err)
- }
- if want, have := "application/json", req.Header.Get("Content-Type"); want != have {
- t.Fatalf("want %q, have %q", want, have)
- }
- req.Header.Set("Content-Type", "application/x-ndjson")
- if want, have := "application/x-ndjson", req.Header.Get("Content-Type"); want != have {
- t.Fatalf("want %q, have %q", want, have)
- }
-}
-
-func BenchmarkRequestSetBodyString(b *testing.B) {
- req, err := NewRequest("GET", "/")
- if err != nil {
- b.Fatal(err)
- }
- for i := 0; i < b.N; i++ {
- body := `{"query":{"match_all":{}}}`
- err = req.SetBody(body)
- if err != nil {
- b.Fatal(err)
- }
- }
- testReq = req
-}
-
-func BenchmarkRequestSetBodyBytes(b *testing.B) {
- req, err := NewRequest("GET", "/")
- if err != nil {
- b.Fatal(err)
- }
- for i := 0; i < b.N; i++ {
- body := []byte(`{"query":{"match_all":{}}}`)
- err = req.SetBody(body)
- if err != nil {
- b.Fatal(err)
- }
- }
- testReq = req
-}
-
-func BenchmarkRequestSetBodyMap(b *testing.B) {
- req, err := NewRequest("GET", "/")
- if err != nil {
- b.Fatal(err)
- }
- for i := 0; i < b.N; i++ {
- body := map[string]interface{}{
- "query": map[string]interface{}{
- "match_all": map[string]interface{}{},
- },
- }
- err = req.SetBody(body)
- if err != nil {
- b.Fatal(err)
- }
- }
- testReq = req
-}
diff --git a/vendor/github.com/olivere/elastic/rescore.go b/vendor/github.com/olivere/elastic/rescore.go
deleted file mode 100644
index 9b7eaee1d..000000000
--- a/vendor/github.com/olivere/elastic/rescore.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-type Rescore struct {
- rescorer Rescorer
- windowSize *int
- defaultRescoreWindowSize *int
-}
-
-func NewRescore() *Rescore {
- return &Rescore{}
-}
-
-func (r *Rescore) WindowSize(windowSize int) *Rescore {
- r.windowSize = &windowSize
- return r
-}
-
-func (r *Rescore) IsEmpty() bool {
- return r.rescorer == nil
-}
-
-func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore {
- r.rescorer = rescorer
- return r
-}
-
-func (r *Rescore) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if r.windowSize != nil {
- source["window_size"] = *r.windowSize
- } else if r.defaultRescoreWindowSize != nil {
- source["window_size"] = *r.defaultRescoreWindowSize
- }
- rescorerSrc, err := r.rescorer.Source()
- if err != nil {
- return nil, err
- }
- source[r.rescorer.Name()] = rescorerSrc
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/rescorer.go b/vendor/github.com/olivere/elastic/rescorer.go
deleted file mode 100644
index ccd4bb854..000000000
--- a/vendor/github.com/olivere/elastic/rescorer.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-type Rescorer interface {
- Name() string
- Source() (interface{}, error)
-}
-
-// -- Query Rescorer --
-
-type QueryRescorer struct {
- query Query
- rescoreQueryWeight *float64
- queryWeight *float64
- scoreMode string
-}
-
-func NewQueryRescorer(query Query) *QueryRescorer {
- return &QueryRescorer{
- query: query,
- }
-}
-
-func (r *QueryRescorer) Name() string {
- return "query"
-}
-
-func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer {
- r.rescoreQueryWeight = &rescoreQueryWeight
- return r
-}
-
-func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer {
- r.queryWeight = &queryWeight
- return r
-}
-
-func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer {
- r.scoreMode = scoreMode
- return r
-}
-
-func (r *QueryRescorer) Source() (interface{}, error) {
- rescoreQuery, err := r.query.Source()
- if err != nil {
- return nil, err
- }
-
- source := make(map[string]interface{})
- source["rescore_query"] = rescoreQuery
- if r.queryWeight != nil {
- source["query_weight"] = *r.queryWeight
- }
- if r.rescoreQueryWeight != nil {
- source["rescore_query_weight"] = *r.rescoreQueryWeight
- }
- if r.scoreMode != "" {
- source["score_mode"] = r.scoreMode
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/response.go b/vendor/github.com/olivere/elastic/response.go
deleted file mode 100644
index 4fcdc32d6..000000000
--- a/vendor/github.com/olivere/elastic/response.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "io/ioutil"
- "net/http"
-)
-
-// Response represents a response from Elasticsearch.
-type Response struct {
- // StatusCode is the HTTP status code, e.g. 200.
- StatusCode int
- // Header is the HTTP header from the HTTP response.
- // Keys in the map are canonicalized (see http.CanonicalHeaderKey).
- Header http.Header
- // Body is the deserialized response body.
- Body json.RawMessage
-}
-
-// newResponse creates a new response from the HTTP response.
-func (c *Client) newResponse(res *http.Response) (*Response, error) {
- r := &Response{
- StatusCode: res.StatusCode,
- Header: res.Header,
- }
- if res.Body != nil {
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return nil, err
- }
- // HEAD requests return a body but no content
- if len(slurp) > 0 {
- r.Body = json.RawMessage(slurp)
- }
- }
- return r, nil
-}
diff --git a/vendor/github.com/olivere/elastic/response_test.go b/vendor/github.com/olivere/elastic/response_test.go
deleted file mode 100644
index e62773403..000000000
--- a/vendor/github.com/olivere/elastic/response_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "testing"
-)
-
-func BenchmarkResponse(b *testing.B) {
- c := &Client{
- decoder: &DefaultDecoder{},
- }
-
- var resp *Response
- for n := 0; n < b.N; n++ {
- iteration := fmt.Sprint(n)
- body := fmt.Sprintf(`{"n":%d}`, n)
- res := &http.Response{
- Header: http.Header{
- "X-Iteration": []string{iteration},
- },
- Body: ioutil.NopCloser(bytes.NewBufferString(body)),
- StatusCode: http.StatusOK,
- }
- var err error
- resp, err = c.newResponse(res)
- if err != nil {
- b.Fatal(err)
- }
- /*
- if want, have := body, string(resp.Body); want != have {
- b.Fatalf("want %q, have %q", want, have)
- }
- //*/
- /*
- if want, have := iteration, resp.Header.Get("X-Iteration"); want != have {
- b.Fatalf("want %q, have %q", want, have)
- }
- //*/
- }
- _ = resp
-}
diff --git a/vendor/github.com/olivere/elastic/retrier.go b/vendor/github.com/olivere/elastic/retrier.go
deleted file mode 100644
index 46d3adfcb..000000000
--- a/vendor/github.com/olivere/elastic/retrier.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "net/http"
- "time"
-)
-
-// RetrierFunc specifies the signature of a Retry function.
-type RetrierFunc func(context.Context, int, *http.Request, *http.Response, error) (time.Duration, bool, error)
-
-// Retrier decides whether to retry a failed HTTP request with Elasticsearch.
-type Retrier interface {
- // Retry is called when a request has failed. It decides whether to retry
- // the call, how long to wait for the next call, or whether to return an
- // error (which will be returned to the service that started the HTTP
- // request in the first place).
- //
- // Callers may also use this to inspect the HTTP request/response and
- // the error that happened. Additional data can be passed through via
- // the context.
- Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error)
-}
-
-// -- StopRetrier --
-
-// StopRetrier is an implementation that does no retries.
-type StopRetrier struct {
-}
-
-// NewStopRetrier returns a retrier that does no retries.
-func NewStopRetrier() *StopRetrier {
- return &StopRetrier{}
-}
-
-// Retry does not retry.
-func (r *StopRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) {
- return 0, false, nil
-}
-
-// -- BackoffRetrier --
-
-// BackoffRetrier is an implementation that does nothing but return nil on Retry.
-type BackoffRetrier struct {
- backoff Backoff
-}
-
-// NewBackoffRetrier returns a retrier that uses the given backoff strategy.
-func NewBackoffRetrier(backoff Backoff) *BackoffRetrier {
- return &BackoffRetrier{backoff: backoff}
-}
-
-// Retry calls into the backoff strategy and its wait interval.
-func (r *BackoffRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) {
- wait, goahead := r.backoff.Next(retry)
- return wait, goahead, nil
-}
diff --git a/vendor/github.com/olivere/elastic/retrier_test.go b/vendor/github.com/olivere/elastic/retrier_test.go
deleted file mode 100644
index c1c5ff524..000000000
--- a/vendor/github.com/olivere/elastic/retrier_test.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "errors"
- "net/http"
- "sync/atomic"
- "testing"
- "time"
-)
-
-type testRetrier struct {
- Retrier
- N int64
- Err error
-}
-
-func (r *testRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) {
- atomic.AddInt64(&r.N, 1)
- if r.Err != nil {
- return 0, false, r.Err
- }
- return r.Retrier.Retry(ctx, retry, req, resp, err)
-}
-
-func TestStopRetrier(t *testing.T) {
- r := NewStopRetrier()
- wait, ok, err := r.Retry(context.TODO(), 1, nil, nil, nil)
- if want, got := 0*time.Second, wait; want != got {
- t.Fatalf("expected %v, got %v", want, got)
- }
- if want, got := false, ok; want != got {
- t.Fatalf("expected %v, got %v", want, got)
- }
- if err != nil {
- t.Fatalf("expected nil, got %v", err)
- }
-}
-
-func TestRetrier(t *testing.T) {
- var numFailedReqs int
- fail := func(r *http.Request) (*http.Response, error) {
- numFailedReqs += 1
- //return &http.Response{Request: r, StatusCode: 400}, nil
- return nil, errors.New("request failed")
- }
-
- tr := &failingTransport{path: "/fail", fail: fail}
- httpClient := &http.Client{Transport: tr}
-
- retrier := &testRetrier{
- Retrier: NewBackoffRetrier(NewSimpleBackoff(100, 100, 100, 100, 100)),
- }
-
- client, err := NewClient(
- SetHttpClient(httpClient),
- SetMaxRetries(5),
- SetHealthcheck(false),
- SetRetrier(retrier))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/fail",
- })
- if err == nil {
- t.Fatal("expected error")
- }
- if res != nil {
- t.Fatal("expected no response")
- }
- // Connection should be marked as dead after it failed
- if numFailedReqs != 5 {
- t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs)
- }
- if retrier.N != 5 {
- t.Errorf("expected %d Retrier calls; got: %d", 5, retrier.N)
- }
-}
-
-func TestRetrierWithError(t *testing.T) {
- var numFailedReqs int
- fail := func(r *http.Request) (*http.Response, error) {
- numFailedReqs += 1
- //return &http.Response{Request: r, StatusCode: 400}, nil
- return nil, errors.New("request failed")
- }
-
- tr := &failingTransport{path: "/fail", fail: fail}
- httpClient := &http.Client{Transport: tr}
-
- kaboom := errors.New("kaboom")
- retrier := &testRetrier{
- Err: kaboom,
- Retrier: NewBackoffRetrier(NewSimpleBackoff(100, 100, 100, 100, 100)),
- }
-
- client, err := NewClient(
- SetHttpClient(httpClient),
- SetMaxRetries(5),
- SetHealthcheck(false),
- SetRetrier(retrier))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/fail",
- })
- if err != kaboom {
- t.Fatalf("expected %v, got %v", kaboom, err)
- }
- if res != nil {
- t.Fatal("expected no response")
- }
- if numFailedReqs != 1 {
- t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs)
- }
- if retrier.N != 1 {
- t.Errorf("expected %d Retrier calls; got: %d", 1, retrier.N)
- }
-}
-
-func TestRetrierOnPerformRequest(t *testing.T) {
- var numFailedReqs int
- fail := func(r *http.Request) (*http.Response, error) {
- numFailedReqs += 1
- //return &http.Response{Request: r, StatusCode: 400}, nil
- return nil, errors.New("request failed")
- }
-
- tr := &failingTransport{path: "/fail", fail: fail}
- httpClient := &http.Client{Transport: tr}
-
- defaultRetrier := &testRetrier{
- Retrier: NewStopRetrier(),
- }
- requestRetrier := &testRetrier{
- Retrier: NewStopRetrier(),
- }
-
- client, err := NewClient(
- SetHttpClient(httpClient),
- SetHealthcheck(false),
- SetRetrier(defaultRetrier))
- if err != nil {
- t.Fatal(err)
- }
-
- res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
- Method: "GET",
- Path: "/fail",
- Retrier: requestRetrier,
- })
- if err == nil {
- t.Fatal("expected error")
- }
- if res != nil {
- t.Fatal("expected no response")
- }
- if want, have := int64(0), defaultRetrier.N; want != have {
- t.Errorf("defaultRetrier: expected %d calls; got: %d", want, have)
- }
- if want, have := int64(1), requestRetrier.N; want != have {
- t.Errorf("requestRetrier: expected %d calls; got: %d", want, have)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/retry.go b/vendor/github.com/olivere/elastic/retry.go
deleted file mode 100644
index 3571a3b7a..000000000
--- a/vendor/github.com/olivere/elastic/retry.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-// This file is based on code (c) 2014 Cenk Altı and governed by the MIT license.
-// See https://github.com/cenkalti/backoff for original source.
-
-package elastic
-
-import "time"
-
-// An Operation is executing by Retry() or RetryNotify().
-// The operation will be retried using a backoff policy if it returns an error.
-type Operation func() error
-
-// Notify is a notify-on-error function. It receives error returned
-// from an operation.
-//
-// Notice that if the backoff policy stated to stop retrying,
-// the notify function isn't called.
-type Notify func(error)
-
-// Retry the function f until it does not return error or BackOff stops.
-// f is guaranteed to be run at least once.
-// It is the caller's responsibility to reset b after Retry returns.
-//
-// Retry sleeps the goroutine for the duration returned by BackOff after a
-// failed operation returns.
-func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) }
-
-// RetryNotify calls notify function with the error and wait duration
-// for each failed attempt before sleep.
-func RetryNotify(operation Operation, b Backoff, notify Notify) error {
- var err error
- var wait time.Duration
- var retry bool
- var n int
-
- for {
- if err = operation(); err == nil {
- return nil
- }
-
- n++
- wait, retry = b.Next(n)
- if !retry {
- return err
- }
-
- if notify != nil {
- notify(err)
- }
-
- time.Sleep(wait)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/retry_test.go b/vendor/github.com/olivere/elastic/retry_test.go
deleted file mode 100644
index 804313095..000000000
--- a/vendor/github.com/olivere/elastic/retry_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-// This file is based on code that is (c) 2014 Cenk Altı and governed
-// by the MIT license.
-// See https://github.com/cenkalti/backoff for original source.
-
-package elastic
-
-import (
- "errors"
- "testing"
- "time"
-)
-
-func TestRetry(t *testing.T) {
- const successOn = 3
- var i = 0
-
- // This function is successfull on "successOn" calls.
- f := func() error {
- i++
- // t.Logf("function is called %d. time\n", i)
-
- if i == successOn {
- // t.Log("OK")
- return nil
- }
-
- // t.Log("error")
- return errors.New("error")
- }
-
- min := time.Duration(8) * time.Millisecond
- max := time.Duration(256) * time.Millisecond
- err := Retry(f, NewExponentialBackoff(min, max))
- if err != nil {
- t.Errorf("unexpected error: %s", err.Error())
- }
- if i != successOn {
- t.Errorf("invalid number of retries: %d", i)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/run-es.sh b/vendor/github.com/olivere/elastic/run-es.sh
deleted file mode 100755
index 624a864ed..000000000
--- a/vendor/github.com/olivere/elastic/run-es.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-VERSION=${VERSION:=6.2.1}
-docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:$VERSION elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
diff --git a/vendor/github.com/olivere/elastic/script.go b/vendor/github.com/olivere/elastic/script.go
deleted file mode 100644
index 273473950..000000000
--- a/vendor/github.com/olivere/elastic/script.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// Script holds all the paramaters necessary to compile or find in cache
-// and then execute a script.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
-// for details of scripting.
-type Script struct {
- script string
- typ string
- lang string
- params map[string]interface{}
-}
-
-// NewScript creates and initializes a new Script.
-func NewScript(script string) *Script {
- return &Script{
- script: script,
- typ: "inline",
- params: make(map[string]interface{}),
- }
-}
-
-// NewScriptInline creates and initializes a new inline script, i.e. code.
-func NewScriptInline(script string) *Script {
- return NewScript(script).Type("inline")
-}
-
-// NewScriptStored creates and initializes a new stored script.
-func NewScriptStored(script string) *Script {
- return NewScript(script).Type("id")
-}
-
-// Script is either the cache key of the script to be compiled/executed
-// or the actual script source code for inline scripts. For indexed
-// scripts this is the id used in the request. For file scripts this is
-// the file name.
-func (s *Script) Script(script string) *Script {
- s.script = script
- return s
-}
-
-// Type sets the type of script: "inline" or "id".
-func (s *Script) Type(typ string) *Script {
- s.typ = typ
- return s
-}
-
-// Lang sets the language of the script. Permitted values are "groovy",
-// "expression", "mustache", "mvel" (default), "javascript", "python".
-// To use certain languages, you need to configure your server and/or
-// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
-// for details.
-func (s *Script) Lang(lang string) *Script {
- s.lang = lang
- return s
-}
-
-// Param adds a key/value pair to the parameters that this script will be executed with.
-func (s *Script) Param(name string, value interface{}) *Script {
- if s.params == nil {
- s.params = make(map[string]interface{})
- }
- s.params[name] = value
- return s
-}
-
-// Params sets the map of parameters this script will be executed with.
-func (s *Script) Params(params map[string]interface{}) *Script {
- s.params = params
- return s
-}
-
-// Source returns the JSON serializable data for this Script.
-func (s *Script) Source() (interface{}, error) {
- if s.typ == "" && s.lang == "" && len(s.params) == 0 {
- return s.script, nil
- }
- source := make(map[string]interface{})
- // Beginning with 6.0, the type can only be "source" or "id"
- if s.typ == "" || s.typ == "inline" {
- source["source"] = s.script
- } else {
- source["id"] = s.script
- }
- if s.lang != "" {
- source["lang"] = s.lang
- }
- if len(s.params) > 0 {
- source["params"] = s.params
- }
- return source, nil
-}
-
-// -- Script Field --
-
-// ScriptField is a single script field.
-type ScriptField struct {
- FieldName string // name of the field
-
- script *Script
-}
-
-// NewScriptField creates and initializes a new ScriptField.
-func NewScriptField(fieldName string, script *Script) *ScriptField {
- return &ScriptField{FieldName: fieldName, script: script}
-}
-
-// Source returns the serializable JSON for the ScriptField.
-func (f *ScriptField) Source() (interface{}, error) {
- if f.script == nil {
- return nil, errors.New("ScriptField expects script")
- }
- source := make(map[string]interface{})
- src, err := f.script.Source()
- if err != nil {
- return nil, err
- }
- source["script"] = src
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/script_test.go b/vendor/github.com/olivere/elastic/script_test.go
deleted file mode 100644
index aa475d7eb..000000000
--- a/vendor/github.com/olivere/elastic/script_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestScriptingDefault(t *testing.T) {
- builder := NewScript("doc['field'].value * 2")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"source":"doc['field'].value * 2"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestScriptingInline(t *testing.T) {
- builder := NewScriptInline("doc['field'].value * factor").Param("factor", 2.0)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"params":{"factor":2},"source":"doc['field'].value * factor"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestScriptingStored(t *testing.T) {
- builder := NewScriptStored("script-with-id").Param("factor", 2.0)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"id":"script-with-id","params":{"factor":2}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/scroll.go b/vendor/github.com/olivere/elastic/scroll.go
deleted file mode 100644
index ac51a8c00..000000000
--- a/vendor/github.com/olivere/elastic/scroll.go
+++ /dev/null
@@ -1,470 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "io"
- "net/url"
- "strings"
- "sync"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-const (
- // DefaultScrollKeepAlive is the default time a scroll cursor will be kept alive.
- DefaultScrollKeepAlive = "5m"
-)
-
-// ScrollService iterates over pages of search results from Elasticsearch.
-type ScrollService struct {
- client *Client
- retrier Retrier
- indices []string
- types []string
- keepAlive string
- body interface{}
- ss *SearchSource
- size *int
- pretty bool
- routing string
- preference string
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
-
- mu sync.RWMutex
- scrollId string
-}
-
-// NewScrollService initializes and returns a new ScrollService.
-func NewScrollService(client *Client) *ScrollService {
- builder := &ScrollService{
- client: client,
- ss: NewSearchSource(),
- keepAlive: DefaultScrollKeepAlive,
- }
- return builder
-}
-
-// Retrier allows to set specific retry logic for this ScrollService.
-// If not specified, it will use the client's default retrier.
-func (s *ScrollService) Retrier(retrier Retrier) *ScrollService {
- s.retrier = retrier
- return s
-}
-
-// Index sets the name of one or more indices to iterate over.
-func (s *ScrollService) Index(indices ...string) *ScrollService {
- if s.indices == nil {
- s.indices = make([]string, 0)
- }
- s.indices = append(s.indices, indices...)
- return s
-}
-
-// Type sets the name of one or more types to iterate over.
-func (s *ScrollService) Type(types ...string) *ScrollService {
- if s.types == nil {
- s.types = make([]string, 0)
- }
- s.types = append(s.types, types...)
- return s
-}
-
-// Scroll is an alias for KeepAlive, the time to keep
-// the cursor alive (e.g. "5m" for 5 minutes).
-func (s *ScrollService) Scroll(keepAlive string) *ScrollService {
- s.keepAlive = keepAlive
- return s
-}
-
-// KeepAlive sets the maximum time after which the cursor will expire.
-// It is "2m" by default.
-func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService {
- s.keepAlive = keepAlive
- return s
-}
-
-// Size specifies the number of documents Elasticsearch should return
-// from each shard, per page.
-func (s *ScrollService) Size(size int) *ScrollService {
- s.size = &size
- return s
-}
-
-// Body sets the raw body to send to Elasticsearch. This can be e.g. a string,
-// a map[string]interface{} or anything that can be serialized into JSON.
-// Notice that setting the body disables the use of SearchSource and many
-// other properties of the ScanService.
-func (s *ScrollService) Body(body interface{}) *ScrollService {
- s.body = body
- return s
-}
-
-// SearchSource sets the search source builder to use with this iterator.
-// Notice that only a certain number of properties can be used when scrolling,
-// e.g. query and sorting.
-func (s *ScrollService) SearchSource(searchSource *SearchSource) *ScrollService {
- s.ss = searchSource
- if s.ss == nil {
- s.ss = NewSearchSource()
- }
- return s
-}
-
-// Query sets the query to perform, e.g. a MatchAllQuery.
-func (s *ScrollService) Query(query Query) *ScrollService {
- s.ss = s.ss.Query(query)
- return s
-}
-
-// PostFilter is executed as the last filter. It only affects the
-// search hits but not facets. See
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-post-filter.html
-// for details.
-func (s *ScrollService) PostFilter(postFilter Query) *ScrollService {
- s.ss = s.ss.PostFilter(postFilter)
- return s
-}
-
-// Slice allows slicing the scroll request into several batches.
-// This is supported in Elasticsearch 5.0 or later.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
-// for details.
-func (s *ScrollService) Slice(sliceQuery Query) *ScrollService {
- s.ss = s.ss.Slice(sliceQuery)
- return s
-}
-
-// FetchSource indicates whether the response should contain the stored
-// _source for every hit.
-func (s *ScrollService) FetchSource(fetchSource bool) *ScrollService {
- s.ss = s.ss.FetchSource(fetchSource)
- return s
-}
-
-// FetchSourceContext indicates how the _source should be fetched.
-func (s *ScrollService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScrollService {
- s.ss = s.ss.FetchSourceContext(fetchSourceContext)
- return s
-}
-
-// Version can be set to true to return a version for each search hit.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-version.html.
-func (s *ScrollService) Version(version bool) *ScrollService {
- s.ss = s.ss.Version(version)
- return s
-}
-
-// Sort adds a sort order. This can have negative effects on the performance
-// of the scroll operation as Elasticsearch needs to sort first.
-func (s *ScrollService) Sort(field string, ascending bool) *ScrollService {
- s.ss = s.ss.Sort(field, ascending)
- return s
-}
-
-// SortWithInfo specifies a sort order. Notice that sorting can have a
-// negative impact on scroll performance.
-func (s *ScrollService) SortWithInfo(info SortInfo) *ScrollService {
- s.ss = s.ss.SortWithInfo(info)
- return s
-}
-
-// SortBy specifies a sort order. Notice that sorting can have a
-// negative impact on scroll performance.
-func (s *ScrollService) SortBy(sorter ...Sorter) *ScrollService {
- s.ss = s.ss.SortBy(sorter...)
- return s
-}
-
-// Pretty asks Elasticsearch to pretty-print the returned JSON.
-func (s *ScrollService) Pretty(pretty bool) *ScrollService {
- s.pretty = pretty
- return s
-}
-
-// Routing is a list of specific routing values to control the shards
-// the search will be executed on.
-func (s *ScrollService) Routing(routings ...string) *ScrollService {
- s.routing = strings.Join(routings, ",")
- return s
-}
-
-// Preference sets the preference to execute the search. Defaults to
-// randomize across shards ("random"). Can be set to "_local" to prefer
-// local shards, "_primary" to execute on primary shards only,
-// or a custom value which guarantees that the same order will be used
-// across different requests.
-func (s *ScrollService) Preference(preference string) *ScrollService {
- s.preference = preference
- return s
-}
-
-// IgnoreUnavailable indicates whether the specified concrete indices
-// should be ignored when unavailable (missing or closed).
-func (s *ScrollService) IgnoreUnavailable(ignoreUnavailable bool) *ScrollService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices. (This includes `_all` string
-// or when no indices have been specified).
-func (s *ScrollService) AllowNoIndices(allowNoIndices bool) *ScrollService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *ScrollService) ExpandWildcards(expandWildcards string) *ScrollService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// ScrollId specifies the identifier of a scroll in action.
-func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
- s.mu.Lock()
- s.scrollId = scrollId
- s.mu.Unlock()
- return s
-}
-
-// Do returns the next search result. It will return io.EOF as error if there
-// are no more search results.
-func (s *ScrollService) Do(ctx context.Context) (*SearchResult, error) {
- s.mu.RLock()
- nextScrollId := s.scrollId
- s.mu.RUnlock()
- if len(nextScrollId) == 0 {
- return s.first(ctx)
- }
- return s.next(ctx)
-}
-
-// Clear cancels the current scroll operation. If you don't do this manually,
-// the scroll will be expired automatically by Elasticsearch. You can control
-// how long a scroll cursor is kept alive with the KeepAlive func.
-func (s *ScrollService) Clear(ctx context.Context) error {
- s.mu.RLock()
- scrollId := s.scrollId
- s.mu.RUnlock()
- if len(scrollId) == 0 {
- return nil
- }
-
- path := "/_search/scroll"
- params := url.Values{}
- body := struct {
- ScrollId []string `json:"scroll_id,omitempty"`
- }{
- ScrollId: []string{scrollId},
- }
-
- _, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "DELETE",
- Path: path,
- Params: params,
- Body: body,
- Retrier: s.retrier,
- })
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// -- First --
-
-// first takes the first page of search results.
-func (s *ScrollService) first(ctx context.Context) (*SearchResult, error) {
- // Get URL and parameters for request
- path, params, err := s.buildFirstURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP request body
- body, err := s.bodyFirst()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- Retrier: s.retrier,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(SearchResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- s.mu.Lock()
- s.scrollId = ret.ScrollId
- s.mu.Unlock()
- if ret.Hits == nil || len(ret.Hits.Hits) == 0 {
- return nil, io.EOF
- }
- return ret, nil
-}
-
-// buildFirstURL builds the URL for retrieving the first page.
-func (s *ScrollService) buildFirstURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.indices) == 0 && len(s.types) == 0 {
- path = "/_search"
- } else if len(s.indices) > 0 && len(s.types) == 0 {
- path, err = uritemplates.Expand("/{index}/_search", map[string]string{
- "index": strings.Join(s.indices, ","),
- })
- } else if len(s.indices) == 0 && len(s.types) > 0 {
- path, err = uritemplates.Expand("/_all/{typ}/_search", map[string]string{
- "typ": strings.Join(s.types, ","),
- })
- } else {
- path, err = uritemplates.Expand("/{index}/{typ}/_search", map[string]string{
- "index": strings.Join(s.indices, ","),
- "typ": strings.Join(s.types, ","),
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.size != nil && *s.size > 0 {
- params.Set("size", fmt.Sprintf("%d", *s.size))
- }
- if len(s.keepAlive) > 0 {
- params.Set("scroll", s.keepAlive)
- }
- if len(s.routing) > 0 {
- params.Set("routing", s.routing)
- }
- if len(s.preference) > 0 {
- params.Set("preference", s.preference)
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if len(s.expandWildcards) > 0 {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
-
- return path, params, nil
-}
-
-// bodyFirst returns the request to fetch the first batch of results.
-func (s *ScrollService) bodyFirst() (interface{}, error) {
- var err error
- var body interface{}
-
- if s.body != nil {
- body = s.body
- } else {
- // Use _doc sort by default if none is specified
- if !s.ss.hasSort() {
- // Use efficient sorting when no user-defined query/body is specified
- s.ss = s.ss.SortBy(SortByDoc{})
- }
-
- // Body from search source
- body, err = s.ss.Source()
- if err != nil {
- return nil, err
- }
- }
-
- return body, nil
-}
-
-// -- Next --
-
-func (s *ScrollService) next(ctx context.Context) (*SearchResult, error) {
- // Get URL for request
- path, params, err := s.buildNextURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- body, err := s.bodyNext()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- Retrier: s.retrier,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(SearchResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- s.mu.Lock()
- s.scrollId = ret.ScrollId
- s.mu.Unlock()
- if ret.Hits == nil || len(ret.Hits.Hits) == 0 {
- return nil, io.EOF
- }
- return ret, nil
-}
-
-// buildNextURL builds the URL for the operation.
-func (s *ScrollService) buildNextURL() (string, url.Values, error) {
- path := "/_search/scroll"
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
-
- return path, params, nil
-}
-
-// body returns the request to fetch the next batch of results.
-func (s *ScrollService) bodyNext() (interface{}, error) {
- s.mu.RLock()
- body := struct {
- Scroll string `json:"scroll"`
- ScrollId string `json:"scroll_id,omitempty"`
- }{
- Scroll: s.keepAlive,
- ScrollId: s.scrollId,
- }
- s.mu.RUnlock()
- return body, nil
-}
diff --git a/vendor/github.com/olivere/elastic/scroll_test.go b/vendor/github.com/olivere/elastic/scroll_test.go
deleted file mode 100644
index c94e5f92f..000000000
--- a/vendor/github.com/olivere/elastic/scroll_test.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "io"
- _ "net/http"
- "testing"
-)
-
-func TestScroll(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Should return all documents. Just don't call Do yet!
- svc := client.Scroll(testIndexName).Size(1)
-
- pages := 0
- docs := 0
-
- for {
- res, err := svc.Do(context.TODO())
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected results != nil; got nil")
- }
- if res.Hits == nil {
- t.Fatal("expected results.Hits != nil; got nil")
- }
- if want, have := int64(3), res.Hits.TotalHits; want != have {
- t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have)
- }
- if want, have := 1, len(res.Hits.Hits); want != have {
- t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have)
- }
-
- pages++
-
- for _, hit := range res.Hits.Hits {
- if hit.Index != testIndexName {
- t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- docs++
- }
-
- if len(res.ScrollId) == 0 {
- t.Fatalf("expected scrollId in results; got %q", res.ScrollId)
- }
- }
-
- if want, have := 3, pages; want != have {
- t.Fatalf("expected to retrieve %d pages; got %d", want, have)
- }
- if want, have := 3, docs; want != have {
- t.Fatalf("expected to retrieve %d hits; got %d", want, have)
- }
-
- err = svc.Clear(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = svc.Do(context.TODO())
- if err == nil {
- t.Fatal("expected to fail")
- }
-}
-
-func TestScrollWithQueryAndSort(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Create a scroll service that returns tweets from user olivere
- // and returns them sorted by "message", in reverse order.
- //
- // Just don't call Do yet!
- svc := client.Scroll(testIndexName).
- Query(NewTermQuery("user", "olivere")).
- Sort("message", false).
- Size(1)
-
- docs := 0
- pages := 0
- for {
- res, err := svc.Do(context.TODO())
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Fatal(err)
- }
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected results != nil; got nil")
- }
- if res.Hits == nil {
- t.Fatal("expected results.Hits != nil; got nil")
- }
- if want, have := int64(2), res.Hits.TotalHits; want != have {
- t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have)
- }
- if want, have := 1, len(res.Hits.Hits); want != have {
- t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have)
- }
-
- pages++
-
- for _, hit := range res.Hits.Hits {
- if hit.Index != testIndexName {
- t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- docs++
- }
- }
-
- if want, have := 2, pages; want != have {
- t.Fatalf("expected to retrieve %d pages; got %d", want, have)
- }
- if want, have := 2, docs; want != have {
- t.Fatalf("expected to retrieve %d hits; got %d", want, have)
- }
-}
-
-func TestScrollWithBody(t *testing.T) {
- // client := setupTestClientAndCreateIndexAndLog(t)
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Test with simple strings and a map
- var tests = []struct {
- Body interface{}
- ExpectedTotalHits int64
- ExpectedDocs int
- ExpectedPages int
- }{
- {
- Body: `{"query":{"match_all":{}}}`,
- ExpectedTotalHits: 3,
- ExpectedDocs: 3,
- ExpectedPages: 3,
- },
- {
- Body: `{"query":{"term":{"user":"olivere"}},"sort":["_doc"]}`,
- ExpectedTotalHits: 2,
- ExpectedDocs: 2,
- ExpectedPages: 2,
- },
- {
- Body: `{"query":{"term":{"user":"olivere"}},"sort":[{"retweets":"desc"}]}`,
- ExpectedTotalHits: 2,
- ExpectedDocs: 2,
- ExpectedPages: 2,
- },
- {
- Body: map[string]interface{}{
- "query": map[string]interface{}{
- "term": map[string]interface{}{
- "user": "olivere",
- },
- },
- "sort": []interface{}{"_doc"},
- },
- ExpectedTotalHits: 2,
- ExpectedDocs: 2,
- ExpectedPages: 2,
- },
- }
-
- for i, tt := range tests {
- // Should return all documents. Just don't call Do yet!
- svc := client.Scroll(testIndexName).Size(1).Body(tt.Body)
-
- pages := 0
- docs := 0
-
- for {
- res, err := svc.Do(context.TODO())
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatalf("#%d: expected results != nil; got nil", i)
- }
- if res.Hits == nil {
- t.Fatalf("#%d: expected results.Hits != nil; got nil", i)
- }
- if want, have := tt.ExpectedTotalHits, res.Hits.TotalHits; want != have {
- t.Fatalf("#%d: expected results.Hits.TotalHits = %d; got %d", i, want, have)
- }
- if want, have := 1, len(res.Hits.Hits); want != have {
- t.Fatalf("#%d: expected len(results.Hits.Hits) = %d; got %d", i, want, have)
- }
-
- pages++
-
- for _, hit := range res.Hits.Hits {
- if hit.Index != testIndexName {
- t.Fatalf("#%d: expected SearchResult.Hits.Hit.Index = %q; got %q", i, testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatalf("#%d: %v", i, err)
- }
- docs++
- }
-
- if len(res.ScrollId) == 0 {
- t.Fatalf("#%d: expected scrollId in results; got %q", i, res.ScrollId)
- }
- }
-
- if want, have := tt.ExpectedPages, pages; want != have {
- t.Fatalf("#%d: expected to retrieve %d pages; got %d", i, want, have)
- }
- if want, have := tt.ExpectedDocs, docs; want != have {
- t.Fatalf("#%d: expected to retrieve %d hits; got %d", i, want, have)
- }
-
- err = svc.Clear(context.TODO())
- if err != nil {
- t.Fatalf("#%d: failed to clear scroll context: %v", i, err)
- }
-
- _, err = svc.Do(context.TODO())
- if err == nil {
- t.Fatalf("#%d: expected to fail", i)
- }
- }
-}
-
-func TestScrollWithSlice(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- // Should return all documents. Just don't call Do yet!
- sliceQuery := NewSliceQuery().Id(0).Max(2)
- svc := client.Scroll(testIndexName).Slice(sliceQuery).Size(1)
-
- pages := 0
- docs := 0
-
- for {
- res, err := svc.Do(context.TODO())
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected results != nil; got nil")
- }
- if res.Hits == nil {
- t.Fatal("expected results.Hits != nil; got nil")
- }
-
- pages++
-
- for _, hit := range res.Hits.Hits {
- if hit.Index != testIndexName {
- t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- docs++
- }
-
- if len(res.ScrollId) == 0 {
- t.Fatalf("expected scrollId in results; got %q", res.ScrollId)
- }
- }
-
- if pages == 0 {
- t.Fatal("expected to retrieve some pages")
- }
- if docs == 0 {
- t.Fatal("expected to retrieve some hits")
- }
-
- if err := svc.Clear(context.TODO()); err != nil {
- t.Fatal(err)
- }
-
- if _, err := svc.Do(context.TODO()); err == nil {
- t.Fatal("expected to fail")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search.go b/vendor/github.com/olivere/elastic/search.go
deleted file mode 100644
index 034b12096..000000000
--- a/vendor/github.com/olivere/elastic/search.go
+++ /dev/null
@@ -1,580 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "reflect"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// Search for documents in Elasticsearch.
-type SearchService struct {
- client *Client
- searchSource *SearchSource
- source interface{}
- pretty bool
- filterPath []string
- searchType string
- index []string
- typ []string
- routing string
- preference string
- requestCache *bool
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
-}
-
-// NewSearchService creates a new service for searching in Elasticsearch.
-func NewSearchService(client *Client) *SearchService {
- builder := &SearchService{
- client: client,
- searchSource: NewSearchSource(),
- }
- return builder
-}
-
-// SearchSource sets the search source builder to use with this service.
-func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService {
- s.searchSource = searchSource
- if s.searchSource == nil {
- s.searchSource = NewSearchSource()
- }
- return s
-}
-
-// Source allows the user to set the request body manually without using
-// any of the structs and interfaces in Elastic.
-func (s *SearchService) Source(source interface{}) *SearchService {
- s.source = source
- return s
-}
-
-// FilterPath allows reducing the response, a mechanism known as
-// response filtering and described here:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/common-options.html#common-options-response-filtering.
-func (s *SearchService) FilterPath(filterPath ...string) *SearchService {
- s.filterPath = append(s.filterPath, filterPath...)
- return s
-}
-
-// Index sets the names of the indices to use for search.
-func (s *SearchService) Index(index ...string) *SearchService {
- s.index = append(s.index, index...)
- return s
-}
-
-// Types adds search restrictions for a list of types.
-func (s *SearchService) Type(typ ...string) *SearchService {
- s.typ = append(s.typ, typ...)
- return s
-}
-
-// Pretty enables the caller to indent the JSON output.
-func (s *SearchService) Pretty(pretty bool) *SearchService {
- s.pretty = pretty
- return s
-}
-
-// Timeout sets the timeout to use, e.g. "1s" or "1000ms".
-func (s *SearchService) Timeout(timeout string) *SearchService {
- s.searchSource = s.searchSource.Timeout(timeout)
- return s
-}
-
-// Profile sets the Profile API flag on the search source.
-// When enabled, a search executed by this service will return query
-// profiling data.
-func (s *SearchService) Profile(profile bool) *SearchService {
- s.searchSource = s.searchSource.Profile(profile)
- return s
-}
-
-// Collapse adds field collapsing.
-func (s *SearchService) Collapse(collapse *CollapseBuilder) *SearchService {
- s.searchSource = s.searchSource.Collapse(collapse)
- return s
-}
-
-// TimeoutInMillis sets the timeout in milliseconds.
-func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService {
- s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis)
- return s
-}
-
-// SearchType sets the search operation type. Valid values are:
-// "dfs_query_then_fetch" and "query_then_fetch".
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-type.html
-// for details.
-func (s *SearchService) SearchType(searchType string) *SearchService {
- s.searchType = searchType
- return s
-}
-
-// Routing is a list of specific routing values to control the shards
-// the search will be executed on.
-func (s *SearchService) Routing(routings ...string) *SearchService {
- s.routing = strings.Join(routings, ",")
- return s
-}
-
-// Preference sets the preference to execute the search. Defaults to
-// randomize across shards ("random"). Can be set to "_local" to prefer
-// local shards, "_primary" to execute on primary shards only,
-// or a custom value which guarantees that the same order will be used
-// across different requests.
-func (s *SearchService) Preference(preference string) *SearchService {
- s.preference = preference
- return s
-}
-
-// RequestCache indicates whether the cache should be used for this
-// request or not, defaults to index level setting.
-func (s *SearchService) RequestCache(requestCache bool) *SearchService {
- s.requestCache = &requestCache
- return s
-}
-
-// Query sets the query to perform, e.g. MatchAllQuery.
-func (s *SearchService) Query(query Query) *SearchService {
- s.searchSource = s.searchSource.Query(query)
- return s
-}
-
-// PostFilter will be executed after the query has been executed and
-// only affects the search hits, not the aggregations.
-// This filter is always executed as the last filtering mechanism.
-func (s *SearchService) PostFilter(postFilter Query) *SearchService {
- s.searchSource = s.searchSource.PostFilter(postFilter)
- return s
-}
-
-// FetchSource indicates whether the response should contain the stored
-// _source for every hit.
-func (s *SearchService) FetchSource(fetchSource bool) *SearchService {
- s.searchSource = s.searchSource.FetchSource(fetchSource)
- return s
-}
-
-// FetchSourceContext indicates how the _source should be fetched.
-func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService {
- s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext)
- return s
-}
-
-// Highlight adds highlighting to the search.
-func (s *SearchService) Highlight(highlight *Highlight) *SearchService {
- s.searchSource = s.searchSource.Highlight(highlight)
- return s
-}
-
-// GlobalSuggestText defines the global text to use with all suggesters.
-// This avoids repetition.
-func (s *SearchService) GlobalSuggestText(globalText string) *SearchService {
- s.searchSource = s.searchSource.GlobalSuggestText(globalText)
- return s
-}
-
-// Suggester adds a suggester to the search.
-func (s *SearchService) Suggester(suggester Suggester) *SearchService {
- s.searchSource = s.searchSource.Suggester(suggester)
- return s
-}
-
-// Aggregation adds an aggreation to perform as part of the search.
-func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService {
- s.searchSource = s.searchSource.Aggregation(name, aggregation)
- return s
-}
-
-// MinScore sets the minimum score below which docs will be filtered out.
-func (s *SearchService) MinScore(minScore float64) *SearchService {
- s.searchSource = s.searchSource.MinScore(minScore)
- return s
-}
-
-// From index to start the search from. Defaults to 0.
-func (s *SearchService) From(from int) *SearchService {
- s.searchSource = s.searchSource.From(from)
- return s
-}
-
-// Size is the number of search hits to return. Defaults to 10.
-func (s *SearchService) Size(size int) *SearchService {
- s.searchSource = s.searchSource.Size(size)
- return s
-}
-
-// Explain indicates whether each search hit should be returned with
-// an explanation of the hit (ranking).
-func (s *SearchService) Explain(explain bool) *SearchService {
- s.searchSource = s.searchSource.Explain(explain)
- return s
-}
-
-// Version indicates whether each search hit should be returned with
-// a version associated to it.
-func (s *SearchService) Version(version bool) *SearchService {
- s.searchSource = s.searchSource.Version(version)
- return s
-}
-
-// Sort adds a sort order.
-func (s *SearchService) Sort(field string, ascending bool) *SearchService {
- s.searchSource = s.searchSource.Sort(field, ascending)
- return s
-}
-
-// SortWithInfo adds a sort order.
-func (s *SearchService) SortWithInfo(info SortInfo) *SearchService {
- s.searchSource = s.searchSource.SortWithInfo(info)
- return s
-}
-
-// SortBy adds a sort order.
-func (s *SearchService) SortBy(sorter ...Sorter) *SearchService {
- s.searchSource = s.searchSource.SortBy(sorter...)
- return s
-}
-
-// NoStoredFields indicates that no stored fields should be loaded, resulting in only
-// id and type to be returned per field.
-func (s *SearchService) NoStoredFields() *SearchService {
- s.searchSource = s.searchSource.NoStoredFields()
- return s
-}
-
-// StoredField adds a single field to load and return (note, must be stored) as
-// part of the search request. If none are specified, the source of the
-// document will be returned.
-func (s *SearchService) StoredField(fieldName string) *SearchService {
- s.searchSource = s.searchSource.StoredField(fieldName)
- return s
-}
-
-// StoredFields sets the fields to load and return as part of the search request.
-// If none are specified, the source of the document will be returned.
-func (s *SearchService) StoredFields(fields ...string) *SearchService {
- s.searchSource = s.searchSource.StoredFields(fields...)
- return s
-}
-
-// TrackScores is applied when sorting and controls if scores will be
-// tracked as well. Defaults to false.
-func (s *SearchService) TrackScores(trackScores bool) *SearchService {
- s.searchSource = s.searchSource.TrackScores(trackScores)
- return s
-}
-
-// SearchAfter allows a different form of pagination by using a live cursor,
-// using the results of the previous page to help the retrieval of the next.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-after.html
-func (s *SearchService) SearchAfter(sortValues ...interface{}) *SearchService {
- s.searchSource = s.searchSource.SearchAfter(sortValues...)
- return s
-}
-
-// IgnoreUnavailable indicates whether the specified concrete indices
-// should be ignored when unavailable (missing or closed).
-func (s *SearchService) IgnoreUnavailable(ignoreUnavailable bool) *SearchService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices. (This includes `_all` string
-// or when no indices have been specified).
-func (s *SearchService) AllowNoIndices(allowNoIndices bool) *SearchService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *SearchService) ExpandWildcards(expandWildcards string) *SearchService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *SearchService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- if len(s.index) > 0 && len(s.typ) > 0 {
- path, err = uritemplates.Expand("/{index}/{type}/_search", map[string]string{
- "index": strings.Join(s.index, ","),
- "type": strings.Join(s.typ, ","),
- })
- } else if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_search", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else if len(s.typ) > 0 {
- path, err = uritemplates.Expand("/_all/{type}/_search", map[string]string{
- "type": strings.Join(s.typ, ","),
- })
- } else {
- path = "/_search"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- if s.searchType != "" {
- params.Set("search_type", s.searchType)
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if s.requestCache != nil {
- params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if len(s.filterPath) > 0 {
- params.Set("filter_path", strings.Join(s.filterPath, ","))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *SearchService) Validate() error {
- return nil
-}
-
-// Do executes the search and returns a SearchResult.
-func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Perform request
- var body interface{}
- if s.source != nil {
- body = s.source
- } else {
- src, err := s.searchSource.Source()
- if err != nil {
- return nil, err
- }
- body = src
- }
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return search results
- ret := new(SearchResult)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// SearchResult is the result of a search in Elasticsearch.
-type SearchResult struct {
- TookInMillis int64 `json:"took"` // search time in milliseconds
- ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations
- Hits *SearchHits `json:"hits"` // the actual search hits
- Suggest SearchSuggest `json:"suggest"` // results from suggesters
- Aggregations Aggregations `json:"aggregations"` // results from aggregations
- TimedOut bool `json:"timed_out"` // true if the search timed out
- Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
- Profile *SearchProfile `json:"profile,omitempty"` // profiling results, if optional Profile API was active for this search
- Shards *shardsInfo `json:"_shards,omitempty"` // shard information
-}
-
-// TotalHits is a convenience function to return the number of hits for
-// a search result.
-func (r *SearchResult) TotalHits() int64 {
- if r.Hits != nil {
- return r.Hits.TotalHits
- }
- return 0
-}
-
-// Each is a utility function to iterate over all hits. It saves you from
-// checking for nil values. Notice that Each will ignore errors in
-// serializing JSON and hits with empty/nil _source will get an empty
-// value
-func (r *SearchResult) Each(typ reflect.Type) []interface{} {
- if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 {
- return nil
- }
- var slice []interface{}
- for _, hit := range r.Hits.Hits {
- v := reflect.New(typ).Elem()
- if hit.Source == nil {
- slice = append(slice, v.Interface())
- continue
- }
- if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil {
- slice = append(slice, v.Interface())
- }
- }
- return slice
-}
-
-// SearchHits specifies the list of search hits.
-type SearchHits struct {
- TotalHits int64 `json:"total"` // total number of hits found
- MaxScore *float64 `json:"max_score"` // maximum score of all hits
- Hits []*SearchHit `json:"hits"` // the actual hits returned
-}
-
-// SearchHit is a single hit.
-type SearchHit struct {
- Score *float64 `json:"_score"` // computed score
- Index string `json:"_index"` // index name
- Type string `json:"_type"` // type meta field
- Id string `json:"_id"` // external or internal
- Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
- Routing string `json:"_routing"` // routing meta field
- Parent string `json:"_parent"` // parent meta field
- Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
- Sort []interface{} `json:"sort"` // sort information
- Highlight SearchHitHighlight `json:"highlight"` // highlighter information
- Source *json.RawMessage `json:"_source"` // stored document source
- Fields map[string]interface{} `json:"fields"` // returned (stored) fields
- Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed
- MatchedQueries []string `json:"matched_queries"` // matched queries
- InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0
-
- // Shard
- // HighlightFields
- // SortValues
- // MatchedFilters
-}
-
-type SearchHitInnerHits struct {
- Hits *SearchHits `json:"hits"`
-}
-
-// SearchExplanation explains how the score for a hit was computed.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-explain.html.
-type SearchExplanation struct {
- Value float64 `json:"value"` // e.g. 1.0
- Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:"
- Details []SearchExplanation `json:"details,omitempty"` // recursive details
-}
-
-// Suggest
-
-// SearchSuggest is a map of suggestions.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
-type SearchSuggest map[string][]SearchSuggestion
-
-// SearchSuggestion is a single search suggestion.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
-type SearchSuggestion struct {
- Text string `json:"text"`
- Offset int `json:"offset"`
- Length int `json:"length"`
- Options []SearchSuggestionOption `json:"options"`
-}
-
-// SearchSuggestionOption is an option of a SearchSuggestion.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
-type SearchSuggestionOption struct {
- Text string `json:"text"`
- Index string `json:"_index"`
- Type string `json:"_type"`
- Id string `json:"_id"`
- Score float64 `json:"score"`
- Highlighted string `json:"highlighted"`
- CollateMatch bool `json:"collate_match"`
- Freq int `json:"freq"` // from TermSuggestion.Option in Java API
- Source *json.RawMessage `json:"_source"`
-}
-
-// SearchProfile is a list of shard profiling data collected during
-// query execution in the "profile" section of a SearchResult
-type SearchProfile struct {
- Shards []SearchProfileShardResult `json:"shards"`
-}
-
-// SearchProfileShardResult returns the profiling data for a single shard
-// accessed during the search query or aggregation.
-type SearchProfileShardResult struct {
- ID string `json:"id"`
- Searches []QueryProfileShardResult `json:"searches"`
- Aggregations []ProfileResult `json:"aggregations"`
-}
-
-// QueryProfileShardResult is a container class to hold the profile results
-// for a single shard in the request. It comtains a list of query profiles,
-// a collector tree and a total rewrite tree.
-type QueryProfileShardResult struct {
- Query []ProfileResult `json:"query,omitempty"`
- RewriteTime int64 `json:"rewrite_time,omitempty"`
- Collector []interface{} `json:"collector,omitempty"`
-}
-
-// CollectorResult holds the profile timings of the collectors used in the
-// search. Children's CollectorResults may be embedded inside of a parent
-// CollectorResult.
-type CollectorResult struct {
- Name string `json:"name,omitempty"`
- Reason string `json:"reason,omitempty"`
- Time string `json:"time,omitempty"`
- TimeNanos int64 `json:"time_in_nanos,omitempty"`
- Children []CollectorResult `json:"children,omitempty"`
-}
-
-// ProfileResult is the internal representation of a profiled query,
-// corresponding to a single node in the query tree.
-type ProfileResult struct {
- Type string `json:"type"`
- Description string `json:"description,omitempty"`
- NodeTime string `json:"time,omitempty"`
- NodeTimeNanos int64 `json:"time_in_nanos,omitempty"`
- Breakdown map[string]int64 `json:"breakdown,omitempty"`
- Children []ProfileResult `json:"children,omitempty"`
-}
-
-// Aggregations (see search_aggs.go)
-
-// Highlighting
-
-// SearchHitHighlight is the highlight information of a search hit.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-highlighting.html
-// for a general discussion of highlighting.
-type SearchHitHighlight map[string][]string
diff --git a/vendor/github.com/olivere/elastic/search_aggs.go b/vendor/github.com/olivere/elastic/search_aggs.go
deleted file mode 100644
index 6359611b1..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs.go
+++ /dev/null
@@ -1,1520 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "bytes"
- "encoding/json"
-)
-
-// Aggregations can be seen as a unit-of-work that build
-// analytic information over a set of documents. It is
-// (in many senses) the follow-up of facets in Elasticsearch.
-// For more details about aggregations, visit:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations.html
-type Aggregation interface {
- // Source returns a JSON-serializable aggregation that is a fragment
- // of the request sent to Elasticsearch.
- Source() (interface{}, error)
-}
-
-// Aggregations is a list of aggregations that are part of a search result.
-type Aggregations map[string]*json.RawMessage
-
-// Min returns min aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-min-aggregation.html
-func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationValueMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Max returns max aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-max-aggregation.html
-func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationValueMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Sum returns sum aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-sum-aggregation.html
-func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationValueMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Avg returns average aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-avg-aggregation.html
-func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationValueMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// ValueCount returns value-count aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-valuecount-aggregation.html
-func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationValueMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Cardinality returns cardinality aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-cardinality-aggregation.html
-func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationValueMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Stats returns stats aggregation results.
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
-func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationStatsMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// ExtendedStats returns extended stats aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-extendedstats-aggregation.html
-func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationExtendedStatsMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// MatrixStats returns matrix stats aggregation results.
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-matrix-stats-aggregation.html
-func (a Aggregations) MatrixStats(name string) (*AggregationMatrixStats, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationMatrixStats)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Percentiles returns percentiles results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-aggregation.html
-func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPercentilesMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// PercentileRanks returns percentile ranks results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-rank-aggregation.html
-func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPercentilesMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// TopHits returns top-hits aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-top-hits-aggregation.html
-func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationTopHitsMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Global returns global results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-global-aggregation.html
-func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationSingleBucket)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Filter returns filter results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filter-aggregation.html
-func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationSingleBucket)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Filters returns filters results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filters-aggregation.html
-func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketFilters)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Missing returns missing results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-missing-aggregation.html
-func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationSingleBucket)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Nested returns nested results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-nested-aggregation.html
-func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationSingleBucket)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// ReverseNested returns reverse-nested results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-reverse-nested-aggregation.html
-func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationSingleBucket)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Children returns children results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-children-aggregation.html
-func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationSingleBucket)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Terms returns terms aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-terms-aggregation.html
-func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketKeyItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// SignificantTerms returns significant terms aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html
-func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketSignificantTerms)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Sampler returns sampler aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-sampler-aggregation.html
-func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationSingleBucket)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Range returns range aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html
-func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketRangeItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// KeyedRange returns keyed range aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html.
-func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketKeyedRangeItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// DateRange returns date range aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-daterange-aggregation.html
-func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketRangeItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// IPRange returns IP range aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-iprange-aggregation.html
-func (a Aggregations) IPRange(name string) (*AggregationBucketRangeItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketRangeItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Histogram returns histogram aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-histogram-aggregation.html
-func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketHistogramItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// DateHistogram returns date histogram aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-datehistogram-aggregation.html
-func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketHistogramItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// GeoBounds returns geo-bounds aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-geobounds-aggregation.html
-func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationGeoBoundsMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// GeoHash returns geo-hash aggregation results.
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geohashgrid-aggregation.html
-func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketKeyItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// GeoDistance returns geo distance aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geodistance-aggregation.html
-func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketRangeItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// AvgBucket returns average bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-avg-bucket-aggregation.html
-func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineSimpleValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// SumBucket returns sum bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-sum-bucket-aggregation.html
-func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineSimpleValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// StatsBucket returns stats bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-stats-bucket-aggregation.html
-func (a Aggregations) StatsBucket(name string) (*AggregationPipelineStatsMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineStatsMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// PercentilesBucket returns stats bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html
-func (a Aggregations) PercentilesBucket(name string) (*AggregationPipelinePercentilesMetric, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelinePercentilesMetric)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// MaxBucket returns maximum bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-max-bucket-aggregation.html
-func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineBucketMetricValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// MinBucket returns minimum bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-min-bucket-aggregation.html
-func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineBucketMetricValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// MovAvg returns moving average pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html
-func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineSimpleValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Derivative returns derivative pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-derivative-aggregation.html
-func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineDerivative)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// CumulativeSum returns a cumulative sum pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-cumulative-sum-aggregation.html
-func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineSimpleValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// BucketScript returns bucket script pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-script-aggregation.html
-func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineSimpleValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// SerialDiff returns serial differencing pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-serialdiff-aggregation.html
-func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationPipelineSimpleValue)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// Composite returns composite bucket aggregation results.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html
-// for details.
-func (a Aggregations) Composite(name string) (*AggregationBucketCompositeItems, bool) {
- if raw, found := a[name]; found {
- agg := new(AggregationBucketCompositeItems)
- if raw == nil {
- return agg, true
- }
- if err := json.Unmarshal(*raw, agg); err == nil {
- return agg, true
- }
- }
- return nil, false
-}
-
-// -- Single value metric --
-
-// AggregationValueMetric is a single-value metric, returned e.g. by a
-// Min or Max aggregation.
-type AggregationValueMetric struct {
- Aggregations
-
- Value *float64 //`json:"value"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure.
-func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["value"]; ok && v != nil {
- json.Unmarshal(*v, &a.Value)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Stats metric --
-
-// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation.
-type AggregationStatsMetric struct {
- Aggregations
-
- Count int64 // `json:"count"`
- Min *float64 //`json:"min,omitempty"`
- Max *float64 //`json:"max,omitempty"`
- Avg *float64 //`json:"avg,omitempty"`
- Sum *float64 //`json:"sum,omitempty"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure.
-func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["count"]; ok && v != nil {
- json.Unmarshal(*v, &a.Count)
- }
- if v, ok := aggs["min"]; ok && v != nil {
- json.Unmarshal(*v, &a.Min)
- }
- if v, ok := aggs["max"]; ok && v != nil {
- json.Unmarshal(*v, &a.Max)
- }
- if v, ok := aggs["avg"]; ok && v != nil {
- json.Unmarshal(*v, &a.Avg)
- }
- if v, ok := aggs["sum"]; ok && v != nil {
- json.Unmarshal(*v, &a.Sum)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Extended stats metric --
-
-// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation.
-type AggregationExtendedStatsMetric struct {
- Aggregations
-
- Count int64 // `json:"count"`
- Min *float64 //`json:"min,omitempty"`
- Max *float64 //`json:"max,omitempty"`
- Avg *float64 //`json:"avg,omitempty"`
- Sum *float64 //`json:"sum,omitempty"`
- SumOfSquares *float64 //`json:"sum_of_squares,omitempty"`
- Variance *float64 //`json:"variance,omitempty"`
- StdDeviation *float64 //`json:"std_deviation,omitempty"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure.
-func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["count"]; ok && v != nil {
- json.Unmarshal(*v, &a.Count)
- }
- if v, ok := aggs["min"]; ok && v != nil {
- json.Unmarshal(*v, &a.Min)
- }
- if v, ok := aggs["max"]; ok && v != nil {
- json.Unmarshal(*v, &a.Max)
- }
- if v, ok := aggs["avg"]; ok && v != nil {
- json.Unmarshal(*v, &a.Avg)
- }
- if v, ok := aggs["sum"]; ok && v != nil {
- json.Unmarshal(*v, &a.Sum)
- }
- if v, ok := aggs["sum_of_squares"]; ok && v != nil {
- json.Unmarshal(*v, &a.SumOfSquares)
- }
- if v, ok := aggs["variance"]; ok && v != nil {
- json.Unmarshal(*v, &a.Variance)
- }
- if v, ok := aggs["std_deviation"]; ok && v != nil {
- json.Unmarshal(*v, &a.StdDeviation)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Matrix Stats --
-
-// AggregationMatrixStats is returned by a MatrixStats aggregation.
-type AggregationMatrixStats struct {
- Aggregations
-
- Fields []*AggregationMatrixStatsField // `json:"field,omitempty"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// AggregationMatrixStatsField represents running stats of a single field
-// returned from MatrixStats aggregation.
-type AggregationMatrixStatsField struct {
- Name string `json:"name"`
- Count int64 `json:"count"`
- Mean float64 `json:"mean,omitempty"`
- Variance float64 `json:"variance,omitempty"`
- Skewness float64 `json:"skewness,omitempty"`
- Kurtosis float64 `json:"kurtosis,omitempty"`
- Covariance map[string]float64 `json:"covariance,omitempty"`
- Correlation map[string]float64 `json:"correlation,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationMatrixStats structure.
-func (a *AggregationMatrixStats) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["fields"]; ok && v != nil {
- // RunningStats for every field
- json.Unmarshal(*v, &a.Fields)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Percentiles metric --
-
-// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation.
-type AggregationPercentilesMetric struct {
- Aggregations
-
- Values map[string]float64 // `json:"values"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure.
-func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["values"]; ok && v != nil {
- json.Unmarshal(*v, &a.Values)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Top-hits metric --
-
-// AggregationTopHitsMetric is a metric returned by a TopHits aggregation.
-type AggregationTopHitsMetric struct {
- Aggregations
-
- Hits *SearchHits //`json:"hits"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure.
-func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- a.Aggregations = aggs
- a.Hits = new(SearchHits)
- if v, ok := aggs["hits"]; ok && v != nil {
- json.Unmarshal(*v, &a.Hits)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- return nil
-}
-
-// -- Geo-bounds metric --
-
-// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation.
-type AggregationGeoBoundsMetric struct {
- Aggregations
-
- Bounds struct {
- TopLeft struct {
- Latitude float64 `json:"lat"`
- Longitude float64 `json:"lon"`
- } `json:"top_left"`
- BottomRight struct {
- Latitude float64 `json:"lat"`
- Longitude float64 `json:"lon"`
- } `json:"bottom_right"`
- } `json:"bounds"`
-
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure.
-func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["bounds"]; ok && v != nil {
- json.Unmarshal(*v, &a.Bounds)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Single bucket --
-
-// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global.
-type AggregationSingleBucket struct {
- Aggregations
-
- DocCount int64 // `json:"doc_count"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure.
-func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCount)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Bucket range items --
-
-// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned
-// with a range aggregation.
-type AggregationBucketRangeItems struct {
- Aggregations
-
- DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
- SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
- Buckets []*AggregationBucketRangeItem //`json:"buckets"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
-func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCountErrorUpperBound)
- }
- if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.SumOfOtherDocCount)
- }
- if v, ok := aggs["buckets"]; ok && v != nil {
- json.Unmarshal(*v, &a.Buckets)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned
-// with a keyed range aggregation.
-type AggregationBucketKeyedRangeItems struct {
- Aggregations
-
- DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
- SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
- Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
-func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCountErrorUpperBound)
- }
- if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.SumOfOtherDocCount)
- }
- if v, ok := aggs["buckets"]; ok && v != nil {
- json.Unmarshal(*v, &a.Buckets)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure.
-type AggregationBucketRangeItem struct {
- Aggregations
-
- Key string //`json:"key"`
- DocCount int64 //`json:"doc_count"`
- From *float64 //`json:"from"`
- FromAsString string //`json:"from_as_string"`
- To *float64 //`json:"to"`
- ToAsString string //`json:"to_as_string"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure.
-func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["key"]; ok && v != nil {
- json.Unmarshal(*v, &a.Key)
- }
- if v, ok := aggs["doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCount)
- }
- if v, ok := aggs["from"]; ok && v != nil {
- json.Unmarshal(*v, &a.From)
- }
- if v, ok := aggs["from_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.FromAsString)
- }
- if v, ok := aggs["to"]; ok && v != nil {
- json.Unmarshal(*v, &a.To)
- }
- if v, ok := aggs["to_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.ToAsString)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Bucket key items --
-
-// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned
-// with a terms aggregation.
-type AggregationBucketKeyItems struct {
- Aggregations
-
- DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
- SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
- Buckets []*AggregationBucketKeyItem //`json:"buckets"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure.
-func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCountErrorUpperBound)
- }
- if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.SumOfOtherDocCount)
- }
- if v, ok := aggs["buckets"]; ok && v != nil {
- json.Unmarshal(*v, &a.Buckets)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure.
-type AggregationBucketKeyItem struct {
- Aggregations
-
- Key interface{} //`json:"key"`
- KeyAsString *string //`json:"key_as_string"`
- KeyNumber json.Number
- DocCount int64 //`json:"doc_count"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure.
-func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- dec := json.NewDecoder(bytes.NewReader(data))
- dec.UseNumber()
- if err := dec.Decode(&aggs); err != nil {
- return err
- }
- if v, ok := aggs["key"]; ok && v != nil {
- json.Unmarshal(*v, &a.Key)
- json.Unmarshal(*v, &a.KeyNumber)
- }
- if v, ok := aggs["key_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.KeyAsString)
- }
- if v, ok := aggs["doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCount)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Bucket types for significant terms --
-
-// AggregationBucketSignificantTerms is a bucket aggregation returned
-// with a significant terms aggregation.
-type AggregationBucketSignificantTerms struct {
- Aggregations
-
- DocCount int64 //`json:"doc_count"`
- Buckets []*AggregationBucketSignificantTerm //`json:"buckets"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure.
-func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCount)
- }
- if v, ok := aggs["buckets"]; ok && v != nil {
- json.Unmarshal(*v, &a.Buckets)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure.
-type AggregationBucketSignificantTerm struct {
- Aggregations
-
- Key string //`json:"key"`
- DocCount int64 //`json:"doc_count"`
- BgCount int64 //`json:"bg_count"`
- Score float64 //`json:"score"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure.
-func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["key"]; ok && v != nil {
- json.Unmarshal(*v, &a.Key)
- }
- if v, ok := aggs["doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCount)
- }
- if v, ok := aggs["bg_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.BgCount)
- }
- if v, ok := aggs["score"]; ok && v != nil {
- json.Unmarshal(*v, &a.Score)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Bucket filters --
-
-// AggregationBucketFilters is a multi-bucket aggregation that is returned
-// with a filters aggregation.
-type AggregationBucketFilters struct {
- Aggregations
-
- Buckets []*AggregationBucketKeyItem //`json:"buckets"`
- NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure.
-func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["buckets"]; ok && v != nil {
- json.Unmarshal(*v, &a.Buckets)
- json.Unmarshal(*v, &a.NamedBuckets)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Bucket histogram items --
-
-// AggregationBucketHistogramItems is a bucket aggregation that is returned
-// with a date histogram aggregation.
-type AggregationBucketHistogramItems struct {
- Aggregations
-
- Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.
-func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["buckets"]; ok && v != nil {
- json.Unmarshal(*v, &a.Buckets)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure.
-type AggregationBucketHistogramItem struct {
- Aggregations
-
- Key float64 //`json:"key"`
- KeyAsString *string //`json:"key_as_string"`
- DocCount int64 //`json:"doc_count"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure.
-func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["key"]; ok && v != nil {
- json.Unmarshal(*v, &a.Key)
- }
- if v, ok := aggs["key_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.KeyAsString)
- }
- if v, ok := aggs["doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCount)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Pipeline simple value --
-
-// AggregationPipelineSimpleValue is a simple value, returned e.g. by a
-// MovAvg aggregation.
-type AggregationPipelineSimpleValue struct {
- Aggregations
-
- Value *float64 // `json:"value"`
- ValueAsString string // `json:"value_as_string"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure.
-func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["value"]; ok && v != nil {
- json.Unmarshal(*v, &a.Value)
- }
- if v, ok := aggs["value_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.ValueAsString)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Pipeline simple value --
-
-// AggregationPipelineBucketMetricValue is a value returned e.g. by a
-// MaxBucket aggregation.
-type AggregationPipelineBucketMetricValue struct {
- Aggregations
-
- Keys []interface{} // `json:"keys"`
- Value *float64 // `json:"value"`
- ValueAsString string // `json:"value_as_string"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure.
-func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["keys"]; ok && v != nil {
- json.Unmarshal(*v, &a.Keys)
- }
- if v, ok := aggs["value"]; ok && v != nil {
- json.Unmarshal(*v, &a.Value)
- }
- if v, ok := aggs["value_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.ValueAsString)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Pipeline derivative --
-
-// AggregationPipelineDerivative is the value returned by a
-// Derivative aggregation.
-type AggregationPipelineDerivative struct {
- Aggregations
-
- Value *float64 // `json:"value"`
- ValueAsString string // `json:"value_as_string"`
- NormalizedValue *float64 // `json:"normalized_value"`
- NormalizedValueAsString string // `json:"normalized_value_as_string"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure.
-func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["value"]; ok && v != nil {
- json.Unmarshal(*v, &a.Value)
- }
- if v, ok := aggs["value_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.ValueAsString)
- }
- if v, ok := aggs["normalized_value"]; ok && v != nil {
- json.Unmarshal(*v, &a.NormalizedValue)
- }
- if v, ok := aggs["normalized_value_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.NormalizedValueAsString)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Pipeline stats metric --
-
-// AggregationPipelineStatsMetric is a simple value, returned e.g. by a
-// MovAvg aggregation.
-type AggregationPipelineStatsMetric struct {
- Aggregations
-
- Count int64 // `json:"count"`
- CountAsString string // `json:"count_as_string"`
- Min *float64 // `json:"min"`
- MinAsString string // `json:"min_as_string"`
- Max *float64 // `json:"max"`
- MaxAsString string // `json:"max_as_string"`
- Avg *float64 // `json:"avg"`
- AvgAsString string // `json:"avg_as_string"`
- Sum *float64 // `json:"sum"`
- SumAsString string // `json:"sum_as_string"`
-
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineStatsMetric structure.
-func (a *AggregationPipelineStatsMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["count"]; ok && v != nil {
- json.Unmarshal(*v, &a.Count)
- }
- if v, ok := aggs["count_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.CountAsString)
- }
- if v, ok := aggs["min"]; ok && v != nil {
- json.Unmarshal(*v, &a.Min)
- }
- if v, ok := aggs["min_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.MinAsString)
- }
- if v, ok := aggs["max"]; ok && v != nil {
- json.Unmarshal(*v, &a.Max)
- }
- if v, ok := aggs["max_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.MaxAsString)
- }
- if v, ok := aggs["avg"]; ok && v != nil {
- json.Unmarshal(*v, &a.Avg)
- }
- if v, ok := aggs["avg_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.AvgAsString)
- }
- if v, ok := aggs["sum"]; ok && v != nil {
- json.Unmarshal(*v, &a.Sum)
- }
- if v, ok := aggs["sum_as_string"]; ok && v != nil {
- json.Unmarshal(*v, &a.SumAsString)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Pipeline percentiles
-
-// AggregationPipelinePercentilesMetric is the value returned by a pipeline
-// percentiles Metric aggregation
-type AggregationPipelinePercentilesMetric struct {
- Aggregations
-
- Values map[string]float64 // `json:"values"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationPipelinePercentilesMetric structure.
-func (a *AggregationPipelinePercentilesMetric) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["values"]; ok && v != nil {
- json.Unmarshal(*v, &a.Values)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// -- Composite key items --
-
-// AggregationBucketCompositeItems implements the response structure
-// for a bucket aggregation of type composite.
-type AggregationBucketCompositeItems struct {
- Aggregations
-
- Buckets []*AggregationBucketCompositeItem //`json:"buckets"`
- Meta map[string]interface{} // `json:"meta,omitempty"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItems structure.
-func (a *AggregationBucketCompositeItems) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- if err := json.Unmarshal(data, &aggs); err != nil {
- return err
- }
- if v, ok := aggs["buckets"]; ok && v != nil {
- json.Unmarshal(*v, &a.Buckets)
- }
- if v, ok := aggs["meta"]; ok && v != nil {
- json.Unmarshal(*v, &a.Meta)
- }
- a.Aggregations = aggs
- return nil
-}
-
-// AggregationBucketCompositeItem is a single bucket of an AggregationBucketCompositeItems structure.
-type AggregationBucketCompositeItem struct {
- Aggregations
-
- Key map[string]interface{} //`json:"key"`
- DocCount int64 //`json:"doc_count"`
-}
-
-// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItem structure.
-func (a *AggregationBucketCompositeItem) UnmarshalJSON(data []byte) error {
- var aggs map[string]*json.RawMessage
- dec := json.NewDecoder(bytes.NewReader(data))
- dec.UseNumber()
- if err := dec.Decode(&aggs); err != nil {
- return err
- }
- if v, ok := aggs["key"]; ok && v != nil {
- json.Unmarshal(*v, &a.Key)
- }
- if v, ok := aggs["doc_count"]; ok && v != nil {
- json.Unmarshal(*v, &a.DocCount)
- }
- a.Aggregations = aggs
- return nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_children.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_children.go
deleted file mode 100644
index 08623a58e..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_children.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// ChildrenAggregation is a special single bucket aggregation that enables
-// aggregating from buckets on parent document types to buckets on child documents.
-// It is available from 1.4.0.Beta1 upwards.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-children-aggregation.html
-type ChildrenAggregation struct {
- typ string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewChildrenAggregation() *ChildrenAggregation {
- return &ChildrenAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation {
- a.typ = typ
- return a
-}
-
-func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *ChildrenAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "to-answers" : {
- // "children": {
- // "type" : "answer"
- // }
- // }
- // }
- // }
- // This method returns only the { "type" : ... } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["children"] = opts
- opts["type"] = a.typ
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go
deleted file mode 100644
index 0486079a9..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestChildrenAggregation(t *testing.T) {
- agg := NewChildrenAggregation().Type("answer")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"children":{"type":"answer"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestChildrenAggregationWithSubAggregation(t *testing.T) {
- subAgg := NewTermsAggregation().Field("owner.display_name").Size(10)
- agg := NewChildrenAggregation().Type("answer")
- agg = agg.SubAggregation("top-names", subAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"children":{"type":"answer"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go
deleted file mode 100644
index 1d9132d2d..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_composite.go
+++ /dev/null
@@ -1,498 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// CompositeAggregation is a multi-bucket values source based aggregation
-// that can be used to calculate unique composite values from source documents.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html
-// for details.
-type CompositeAggregation struct {
- after map[string]interface{}
- size *int
- sources []CompositeAggregationValuesSource
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-// NewCompositeAggregation creates a new CompositeAggregation.
-func NewCompositeAggregation() *CompositeAggregation {
- return &CompositeAggregation{
- sources: make([]CompositeAggregationValuesSource, 0),
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-// Size represents the number of composite buckets to return.
-// Defaults to 10 as of Elasticsearch 6.1.
-func (a *CompositeAggregation) Size(size int) *CompositeAggregation {
- a.size = &size
- return a
-}
-
-// AggregateAfter sets the values that indicate which composite bucket this
-// request should "aggregate after".
-func (a *CompositeAggregation) AggregateAfter(after map[string]interface{}) *CompositeAggregation {
- a.after = after
- return a
-}
-
-// Sources specifies the list of CompositeAggregationValuesSource instances to
-// use in the aggregation.
-func (a *CompositeAggregation) Sources(sources ...CompositeAggregationValuesSource) *CompositeAggregation {
- a.sources = append(a.sources, sources...)
- return a
-}
-
-// SubAggregations of this aggregation.
-func (a *CompositeAggregation) SubAggregation(name string, subAggregation Aggregation) *CompositeAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *CompositeAggregation) Meta(metaData map[string]interface{}) *CompositeAggregation {
- a.meta = metaData
- return a
-}
-
-// Source returns the serializable JSON for this aggregation.
-func (a *CompositeAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "my_composite_agg" : {
- // "composite" : {
- // "sources": [
- // {"my_term": { "terms": { "field": "product" }}},
- // {"my_histo": { "histogram": { "field": "price", "interval": 5 }}},
- // {"my_date": { "date_histogram": { "field": "timestamp", "interval": "1d" }}},
- // ],
- // "size" : 10,
- // "after" : ["a", 2, "c"]
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "histogram" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["composite"] = opts
-
- sources := make([]interface{}, len(a.sources))
- for i, s := range a.sources {
- src, err := s.Source()
- if err != nil {
- return nil, err
- }
- sources[i] = src
- }
- opts["sources"] = sources
-
- if a.size != nil {
- opts["size"] = *a.size
- }
-
- if a.after != nil {
- opts["after"] = a.after
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
-
-// -- Generic interface for CompositeAggregationValues --
-
-// CompositeAggregationValuesSource specifies the interface that
-// all implementations for CompositeAggregation's Sources method
-// need to implement.
-//
-// The different implementations are described in
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_values_source_2.
-type CompositeAggregationValuesSource interface {
- Source() (interface{}, error)
-}
-
-// -- CompositeAggregationTermsValuesSource --
-
-// CompositeAggregationTermsValuesSource is a source for the CompositeAggregation that handles terms
-// it works very similar to a terms aggregation with slightly different syntax
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_terms
-// for details.
-type CompositeAggregationTermsValuesSource struct {
- name string
- field string
- script *Script
- valueType string
- missing interface{}
- order string
-}
-
-// NewCompositeAggregationTermsValuesSource creates and initializes
-// a new CompositeAggregationTermsValuesSource.
-func NewCompositeAggregationTermsValuesSource(name string) *CompositeAggregationTermsValuesSource {
- return &CompositeAggregationTermsValuesSource{
- name: name,
- }
-}
-
-// Field to use for this source.
-func (a *CompositeAggregationTermsValuesSource) Field(field string) *CompositeAggregationTermsValuesSource {
- a.field = field
- return a
-}
-
-// Script to use for this source.
-func (a *CompositeAggregationTermsValuesSource) Script(script *Script) *CompositeAggregationTermsValuesSource {
- a.script = script
- return a
-}
-
-// ValueType specifies the type of values produced by this source,
-// e.g. "string" or "date".
-func (a *CompositeAggregationTermsValuesSource) ValueType(valueType string) *CompositeAggregationTermsValuesSource {
- a.valueType = valueType
- return a
-}
-
-// Order specifies the order in the values produced by this source.
-// It can be either "asc" or "desc".
-func (a *CompositeAggregationTermsValuesSource) Order(order string) *CompositeAggregationTermsValuesSource {
- a.order = order
- return a
-}
-
-// Asc ensures the order of the values produced is ascending.
-func (a *CompositeAggregationTermsValuesSource) Asc() *CompositeAggregationTermsValuesSource {
- a.order = "asc"
- return a
-}
-
-// Desc ensures the order of the values produced is descending.
-func (a *CompositeAggregationTermsValuesSource) Desc() *CompositeAggregationTermsValuesSource {
- a.order = "desc"
- return a
-}
-
-// Missing specifies the value to use when the source finds a missing
-// value in a document.
-func (a *CompositeAggregationTermsValuesSource) Missing(missing interface{}) *CompositeAggregationTermsValuesSource {
- a.missing = missing
- return a
-}
-
-// Source returns the serializable JSON for this values source.
-func (a *CompositeAggregationTermsValuesSource) Source() (interface{}, error) {
- source := make(map[string]interface{})
- name := make(map[string]interface{})
- source[a.name] = name
- values := make(map[string]interface{})
- name["terms"] = values
-
- // field
- if a.field != "" {
- values["field"] = a.field
- }
-
- // script
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- values["script"] = src
- }
-
- // missing
- if a.missing != nil {
- values["missing"] = a.missing
- }
-
- // value_type
- if a.valueType != "" {
- values["value_type"] = a.valueType
- }
-
- // order
- if a.order != "" {
- values["order"] = a.order
- }
-
- return source, nil
-
-}
-
-// -- CompositeAggregationHistogramValuesSource --
-
-// CompositeAggregationHistogramValuesSource is a source for the CompositeAggregation that handles histograms
-// it works very similar to a terms histogram with slightly different syntax
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_histogram
-// for details.
-type CompositeAggregationHistogramValuesSource struct {
- name string
- field string
- script *Script
- valueType string
- missing interface{}
- order string
- interval float64
-}
-
-// NewCompositeAggregationHistogramValuesSource creates and initializes
-// a new CompositeAggregationHistogramValuesSource.
-func NewCompositeAggregationHistogramValuesSource(name string, interval float64) *CompositeAggregationHistogramValuesSource {
- return &CompositeAggregationHistogramValuesSource{
- name: name,
- interval: interval,
- }
-}
-
-// Field to use for this source.
-func (a *CompositeAggregationHistogramValuesSource) Field(field string) *CompositeAggregationHistogramValuesSource {
- a.field = field
- return a
-}
-
-// Script to use for this source.
-func (a *CompositeAggregationHistogramValuesSource) Script(script *Script) *CompositeAggregationHistogramValuesSource {
- a.script = script
- return a
-}
-
-// ValueType specifies the type of values produced by this source,
-// e.g. "string" or "date".
-func (a *CompositeAggregationHistogramValuesSource) ValueType(valueType string) *CompositeAggregationHistogramValuesSource {
- a.valueType = valueType
- return a
-}
-
-// Missing specifies the value to use when the source finds a missing
-// value in a document.
-func (a *CompositeAggregationHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationHistogramValuesSource {
- a.missing = missing
- return a
-}
-
-// Order specifies the order in the values produced by this source.
-// It can be either "asc" or "desc".
-func (a *CompositeAggregationHistogramValuesSource) Order(order string) *CompositeAggregationHistogramValuesSource {
- a.order = order
- return a
-}
-
-// Asc ensures the order of the values produced is ascending.
-func (a *CompositeAggregationHistogramValuesSource) Asc() *CompositeAggregationHistogramValuesSource {
- a.order = "asc"
- return a
-}
-
-// Desc ensures the order of the values produced is descending.
-func (a *CompositeAggregationHistogramValuesSource) Desc() *CompositeAggregationHistogramValuesSource {
- a.order = "desc"
- return a
-}
-
-// Interval specifies the interval to use.
-func (a *CompositeAggregationHistogramValuesSource) Interval(interval float64) *CompositeAggregationHistogramValuesSource {
- a.interval = interval
- return a
-}
-
-// Source returns the serializable JSON for this values source.
-func (a *CompositeAggregationHistogramValuesSource) Source() (interface{}, error) {
- source := make(map[string]interface{})
- name := make(map[string]interface{})
- source[a.name] = name
- values := make(map[string]interface{})
- name["histogram"] = values
-
- // field
- if a.field != "" {
- values["field"] = a.field
- }
-
- // script
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- values["script"] = src
- }
-
- // missing
- if a.missing != nil {
- values["missing"] = a.missing
- }
-
- // value_type
- if a.valueType != "" {
- values["value_type"] = a.valueType
- }
-
- // order
- if a.order != "" {
- values["order"] = a.order
- }
-
- // Histogram-related properties
- values["interval"] = a.interval
-
- return source, nil
-
-}
-
-// -- CompositeAggregationDateHistogramValuesSource --
-
-// CompositeAggregationDateHistogramValuesSource is a source for the CompositeAggregation that handles date histograms
-// it works very similar to a date histogram aggregation with slightly different syntax
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-aggregations-bucket-composite-aggregation.html#_date_histogram
-// for details.
-type CompositeAggregationDateHistogramValuesSource struct {
- name string
- field string
- script *Script
- valueType string
- missing interface{}
- order string
- interval interface{}
- timeZone string
-}
-
-// NewCompositeAggregationDateHistogramValuesSource creates and initializes
-// a new CompositeAggregationDateHistogramValuesSource.
-func NewCompositeAggregationDateHistogramValuesSource(name string, interval interface{}) *CompositeAggregationDateHistogramValuesSource {
- return &CompositeAggregationDateHistogramValuesSource{
- name: name,
- interval: interval,
- }
-}
-
-// Field to use for this source.
-func (a *CompositeAggregationDateHistogramValuesSource) Field(field string) *CompositeAggregationDateHistogramValuesSource {
- a.field = field
- return a
-}
-
-// Script to use for this source.
-func (a *CompositeAggregationDateHistogramValuesSource) Script(script *Script) *CompositeAggregationDateHistogramValuesSource {
- a.script = script
- return a
-}
-
-// ValueType specifies the type of values produced by this source,
-// e.g. "string" or "date".
-func (a *CompositeAggregationDateHistogramValuesSource) ValueType(valueType string) *CompositeAggregationDateHistogramValuesSource {
- a.valueType = valueType
- return a
-}
-
-// Missing specifies the value to use when the source finds a missing
-// value in a document.
-func (a *CompositeAggregationDateHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationDateHistogramValuesSource {
- a.missing = missing
- return a
-}
-
-// Order specifies the order in the values produced by this source.
-// It can be either "asc" or "desc".
-func (a *CompositeAggregationDateHistogramValuesSource) Order(order string) *CompositeAggregationDateHistogramValuesSource {
- a.order = order
- return a
-}
-
-// Asc ensures the order of the values produced is ascending.
-func (a *CompositeAggregationDateHistogramValuesSource) Asc() *CompositeAggregationDateHistogramValuesSource {
- a.order = "asc"
- return a
-}
-
-// Desc ensures the order of the values produced is descending.
-func (a *CompositeAggregationDateHistogramValuesSource) Desc() *CompositeAggregationDateHistogramValuesSource {
- a.order = "desc"
- return a
-}
-
-// Interval to use for the date histogram, e.g. "1d" or a numeric value like "60".
-func (a *CompositeAggregationDateHistogramValuesSource) Interval(interval interface{}) *CompositeAggregationDateHistogramValuesSource {
- a.interval = interval
- return a
-}
-
-// TimeZone to use for the dates.
-func (a *CompositeAggregationDateHistogramValuesSource) TimeZone(timeZone string) *CompositeAggregationDateHistogramValuesSource {
- a.timeZone = timeZone
- return a
-}
-
-// Source returns the serializable JSON for this values source.
-func (a *CompositeAggregationDateHistogramValuesSource) Source() (interface{}, error) {
- source := make(map[string]interface{})
- name := make(map[string]interface{})
- source[a.name] = name
- values := make(map[string]interface{})
- name["date_histogram"] = values
-
- // field
- if a.field != "" {
- values["field"] = a.field
- }
-
- // script
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- values["script"] = src
- }
-
- // missing
- if a.missing != nil {
- values["missing"] = a.missing
- }
-
- // value_type
- if a.valueType != "" {
- values["value_type"] = a.valueType
- }
-
- // order
- if a.order != "" {
- values["order"] = a.order
- }
-
- // DateHistogram-related properties
- values["interval"] = a.interval
-
- // timeZone
- if a.timeZone != "" {
- values["time_zone"] = a.timeZone
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go
deleted file mode 100644
index 91d84dbdb..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_composite_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestCompositeAggregation(t *testing.T) {
- agg := NewCompositeAggregation().
- Sources(
- NewCompositeAggregationTermsValuesSource("my_terms").Field("a_term").Missing("N/A").Order("asc"),
- NewCompositeAggregationHistogramValuesSource("my_histogram", 5).Field("price").Asc(),
- NewCompositeAggregationDateHistogramValuesSource("my_date_histogram", "1d").Field("purchase_date").Desc(),
- ).
- Size(10).
- AggregateAfter(map[string]interface{}{
- "my_terms": "1",
- "my_histogram": 2,
- "my_date_histogram": "3",
- })
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"composite":{"after":{"my_date_histogram":"3","my_histogram":2,"my_terms":"1"},"size":10,"sources":[{"my_terms":{"terms":{"field":"a_term","missing":"N/A","order":"asc"}}},{"my_histogram":{"histogram":{"field":"price","interval":5,"order":"asc"}}},{"my_date_histogram":{"date_histogram":{"field":"purchase_date","interval":"1d","order":"desc"}}}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCompositeAggregationTermsValuesSource(t *testing.T) {
- in := NewCompositeAggregationTermsValuesSource("products").
- Script(NewScript("doc['product'].value").Lang("painless"))
- src, err := in.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"products":{"terms":{"script":{"lang":"painless","source":"doc['product'].value"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCompositeAggregationHistogramValuesSource(t *testing.T) {
- in := NewCompositeAggregationHistogramValuesSource("histo", 5).
- Field("price")
- src, err := in.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"histo":{"histogram":{"field":"price","interval":5}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCompositeAggregationDateHistogramValuesSource(t *testing.T) {
- in := NewCompositeAggregationDateHistogramValuesSource("date", "1d").
- Field("timestamp")
- src, err := in.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date":{"date_histogram":{"field":"timestamp","interval":"1d"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go
deleted file mode 100644
index 53efdaf5f..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// BucketCountThresholds is used in e.g. terms and significant text aggregations.
-type BucketCountThresholds struct {
- MinDocCount *int64
- ShardMinDocCount *int64
- RequiredSize *int
- ShardSize *int
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go
deleted file mode 100644
index 1e7a1246c..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// DateHistogramAggregation is a multi-bucket aggregation similar to the
-// histogram except it can only be applied on date values.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-datehistogram-aggregation.html
-type DateHistogramAggregation struct {
- field string
- script *Script
- missing interface{}
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-
- interval string
- order string
- orderAsc bool
- minDocCount *int64
- extendedBoundsMin interface{}
- extendedBoundsMax interface{}
- timeZone string
- format string
- offset string
-}
-
-// NewDateHistogramAggregation creates a new DateHistogramAggregation.
-func NewDateHistogramAggregation() *DateHistogramAggregation {
- return &DateHistogramAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-// Field on which the aggregation is processed.
-func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation {
- a.field = field
- return a
-}
-
-func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation {
- a.script = script
- return a
-}
-
-// Missing configures the value to use when documents miss a value.
-func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation {
- a.missing = missing
- return a
-}
-
-func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation {
- a.meta = metaData
- return a
-}
-
-// Interval by which the aggregation gets processed.
-// Allowed values are: "year", "quarter", "month", "week", "day",
-// "hour", "minute". It also supports time settings like "1.5h"
-// (up to "w" for weeks).
-func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation {
- a.interval = interval
- return a
-}
-
-// Order specifies the sort order. Valid values for order are:
-// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
-// with a metric.
-func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation {
- a.order = order
- a.orderAsc = asc
- return a
-}
-
-func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation {
- // "order" : { "_count" : "asc" }
- a.order = "_count"
- a.orderAsc = asc
- return a
-}
-
-func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation {
- return a.OrderByCount(true)
-}
-
-func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation {
- return a.OrderByCount(false)
-}
-
-func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation {
- // "order" : { "_key" : "asc" }
- a.order = "_key"
- a.orderAsc = asc
- return a
-}
-
-func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation {
- return a.OrderByKey(true)
-}
-
-func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation {
- return a.OrderByKey(false)
-}
-
-// OrderByAggregation creates a bucket ordering strategy which sorts buckets
-// based on a single-valued calc get.
-func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation {
- // {
- // "aggs" : {
- // "genders" : {
- // "terms" : {
- // "field" : "gender",
- // "order" : { "avg_height" : "desc" }
- // },
- // "aggs" : {
- // "avg_height" : { "avg" : { "field" : "height" } }
- // }
- // }
- // }
- // }
- a.order = aggName
- a.orderAsc = asc
- return a
-}
-
-// OrderByAggregationAndMetric creates a bucket ordering strategy which
-// sorts buckets based on a multi-valued calc get.
-func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation {
- // {
- // "aggs" : {
- // "genders" : {
- // "terms" : {
- // "field" : "gender",
- // "order" : { "height_stats.avg" : "desc" }
- // },
- // "aggs" : {
- // "height_stats" : { "stats" : { "field" : "height" } }
- // }
- // }
- // }
- // }
- a.order = aggName + "." + metric
- a.orderAsc = asc
- return a
-}
-
-// MinDocCount sets the minimum document count per bucket.
-// Buckets with less documents than this min value will not be returned.
-func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation {
- a.minDocCount = &minDocCount
- return a
-}
-
-// TimeZone sets the timezone in which to translate dates before computing buckets.
-func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation {
- a.timeZone = timeZone
- return a
-}
-
-// Format sets the format to use for dates.
-func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation {
- a.format = format
- return a
-}
-
-// Offset sets the offset of time intervals in the histogram, e.g. "+6h".
-func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation {
- a.offset = offset
- return a
-}
-
-// ExtendedBounds accepts int, int64, string, or time.Time values.
-// In case the lower value in the histogram would be greater than min or the
-// upper value would be less than max, empty buckets will be generated.
-func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation {
- a.extendedBoundsMin = min
- a.extendedBoundsMax = max
- return a
-}
-
-// ExtendedBoundsMin accepts int, int64, string, or time.Time values.
-func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation {
- a.extendedBoundsMin = min
- return a
-}
-
-// ExtendedBoundsMax accepts int, int64, string, or time.Time values.
-func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation {
- a.extendedBoundsMax = max
- return a
-}
-
-func (a *DateHistogramAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "articles_over_time" : {
- // "date_histogram" : {
- // "field" : "date",
- // "interval" : "month"
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "date_histogram" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["date_histogram"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.missing != nil {
- opts["missing"] = a.missing
- }
-
- opts["interval"] = a.interval
- if a.minDocCount != nil {
- opts["min_doc_count"] = *a.minDocCount
- }
- if a.order != "" {
- o := make(map[string]interface{})
- if a.orderAsc {
- o[a.order] = "asc"
- } else {
- o[a.order] = "desc"
- }
- opts["order"] = o
- }
- if a.timeZone != "" {
- opts["time_zone"] = a.timeZone
- }
- if a.offset != "" {
- opts["offset"] = a.offset
- }
- if a.format != "" {
- opts["format"] = a.format
- }
- if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
- bounds := make(map[string]interface{})
- if a.extendedBoundsMin != nil {
- bounds["min"] = a.extendedBoundsMin
- }
- if a.extendedBoundsMax != nil {
- bounds["max"] = a.extendedBoundsMax
- }
- opts["extended_bounds"] = bounds
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go
deleted file mode 100644
index ddf790834..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestDateHistogramAggregation(t *testing.T) {
- agg := NewDateHistogramAggregation().
- Field("date").
- Interval("month").
- Format("YYYY-MM").
- TimeZone("UTC").
- Offset("+6h")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month","offset":"+6h","time_zone":"UTC"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestDateHistogramAggregationWithMissing(t *testing.T) {
- agg := NewDateHistogramAggregation().Field("date").Interval("year").Missing("1900")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_histogram":{"field":"date","interval":"year","missing":"1900"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go
deleted file mode 100644
index 714fd3e11..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "time"
-)
-
-// DateRangeAggregation is a range aggregation that is dedicated for
-// date values. The main difference between this aggregation and the
-// normal range aggregation is that the from and to values can be expressed
-// in Date Math expressions, and it is also possible to specify a
-// date format by which the from and to response fields will be returned.
-// Note that this aggregration includes the from value and excludes the to
-// value for each range.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-daterange-aggregation.html
-type DateRangeAggregation struct {
- field string
- script *Script
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- keyed *bool
- unmapped *bool
- timeZone string
- format string
- entries []DateRangeAggregationEntry
-}
-
-type DateRangeAggregationEntry struct {
- Key string
- From interface{}
- To interface{}
-}
-
-func NewDateRangeAggregation() *DateRangeAggregation {
- return &DateRangeAggregation{
- subAggregations: make(map[string]Aggregation),
- entries: make([]DateRangeAggregationEntry, 0),
- }
-}
-
-func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation {
- a.field = field
- return a
-}
-
-func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation {
- a.script = script
- return a
-}
-
-func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation {
- a.keyed = &keyed
- return a
-}
-
-func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation {
- a.unmapped = &unmapped
- return a
-}
-
-func (a *DateRangeAggregation) TimeZone(timeZone string) *DateRangeAggregation {
- a.timeZone = timeZone
- return a
-}
-
-func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation {
- a.format = format
- return a
-}
-
-func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
- return a
-}
-
-func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
- return a
-}
-
-func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
- return a
-}
-
-func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
- return a
-}
-
-func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation {
- a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
- return a
-}
-
-func (a *DateRangeAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "range" : {
- // "date_range": {
- // "field": "date",
- // "format": "MM-yyy",
- // "ranges": [
- // { "to": "now-10M/M" },
- // { "from": "now-10M/M" }
- // ]
- // }
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "date_range" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["date_range"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
-
- if a.keyed != nil {
- opts["keyed"] = *a.keyed
- }
- if a.unmapped != nil {
- opts["unmapped"] = *a.unmapped
- }
- if a.timeZone != "" {
- opts["time_zone"] = a.timeZone
- }
- if a.format != "" {
- opts["format"] = a.format
- }
-
- var ranges []interface{}
- for _, ent := range a.entries {
- r := make(map[string]interface{})
- if ent.Key != "" {
- r["key"] = ent.Key
- }
- if ent.From != nil {
- switch from := ent.From.(type) {
- case int, int16, int32, int64, float32, float64:
- r["from"] = from
- case *int, *int16, *int32, *int64, *float32, *float64:
- r["from"] = from
- case time.Time:
- r["from"] = from.Format(time.RFC3339)
- case *time.Time:
- r["from"] = from.Format(time.RFC3339)
- case string:
- r["from"] = from
- case *string:
- r["from"] = from
- }
- }
- if ent.To != nil {
- switch to := ent.To.(type) {
- case int, int16, int32, int64, float32, float64:
- r["to"] = to
- case *int, *int16, *int32, *int64, *float32, *float64:
- r["to"] = to
- case time.Time:
- r["to"] = to.Format(time.RFC3339)
- case *time.Time:
- r["to"] = to.Format(time.RFC3339)
- case string:
- r["to"] = to
- case *string:
- r["to"] = to
- }
- }
- ranges = append(ranges, r)
- }
- opts["ranges"] = ranges
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go
deleted file mode 100644
index 89ed495f3..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestDateRangeAggregation(t *testing.T) {
- agg := NewDateRangeAggregation().Field("created_at").TimeZone("UTC")
- agg = agg.AddRange(nil, "2012-12-31")
- agg = agg.AddRange("2013-01-01", "2013-12-31")
- agg = agg.AddRange("2014-01-01", nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}],"time_zone":"UTC"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestDateRangeAggregationWithPointers(t *testing.T) {
- d1 := "2012-12-31"
- d2 := "2013-01-01"
- d3 := "2013-12-31"
- d4 := "2014-01-01"
-
- agg := NewDateRangeAggregation().Field("created_at")
- agg = agg.AddRange(nil, &d1)
- agg = agg.AddRange(d2, &d3)
- agg = agg.AddRange(d4, nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestDateRangeAggregationWithUnbounded(t *testing.T) {
- agg := NewDateRangeAggregation().Field("created_at").
- AddUnboundedFrom("2012-12-31").
- AddRange("2013-01-01", "2013-12-31").
- AddUnboundedTo("2014-01-01")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestDateRangeAggregationWithLtAndCo(t *testing.T) {
- agg := NewDateRangeAggregation().Field("created_at").
- Lt("2012-12-31").
- Between("2013-01-01", "2013-12-31").
- Gt("2014-01-01")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestDateRangeAggregationWithKeyedFlag(t *testing.T) {
- agg := NewDateRangeAggregation().Field("created_at").
- Keyed(true).
- Lt("2012-12-31").
- Between("2013-01-01", "2013-12-31").
- Gt("2014-01-01")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestDateRangeAggregationWithKeys(t *testing.T) {
- agg := NewDateRangeAggregation().Field("created_at").
- Keyed(true).
- LtWithKey("pre-2012", "2012-12-31").
- BetweenWithKey("2013", "2013-01-01", "2013-12-31").
- GtWithKey("post-2013", "2014-01-01")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestDateRangeAggregationWithSpecialNames(t *testing.T) {
- agg := NewDateRangeAggregation().Field("created_at").
- AddRange("now-10M/M", "now+10M/M")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go
deleted file mode 100644
index e4fbc67da..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// FilterAggregation defines a single bucket of all the documents
-// in the current document set context that match a specified filter.
-// Often this will be used to narrow down the current aggregation context
-// to a specific set of documents.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filter-aggregation.html
-type FilterAggregation struct {
- filter Query
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewFilterAggregation() *FilterAggregation {
- return &FilterAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *FilterAggregation) Filter(filter Query) *FilterAggregation {
- a.filter = filter
- return a
-}
-
-func (a *FilterAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "in_stock_products" : {
- // "filter" : { "range" : { "stock" : { "gt" : 0 } } }
- // }
- // }
- // }
- // This method returns only the { "filter" : {} } part.
-
- src, err := a.filter.Source()
- if err != nil {
- return nil, err
- }
- source := make(map[string]interface{})
- source["filter"] = src
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go
deleted file mode 100644
index 6aa4fbb7c..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestFilterAggregation(t *testing.T) {
- filter := NewRangeQuery("stock").Gt(0)
- agg := NewFilterAggregation().Filter(filter)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFilterAggregationWithSubAggregation(t *testing.T) {
- avgPriceAgg := NewAvgAggregation().Field("price")
- filter := NewRangeQuery("stock").Gt(0)
- agg := NewFilterAggregation().Filter(filter).
- SubAggregation("avg_price", avgPriceAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFilterAggregationWithMeta(t *testing.T) {
- filter := NewRangeQuery("stock").Gt(0)
- agg := NewFilterAggregation().Filter(filter).Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go
deleted file mode 100644
index 0d128ca17..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// FiltersAggregation defines a multi bucket aggregations where each bucket
-// is associated with a filter. Each bucket will collect all documents that
-// match its associated filter.
-//
-// Notice that the caller has to decide whether to add filters by name
-// (using FilterWithName) or unnamed filters (using Filter or Filters). One cannot
-// use both named and unnamed filters.
-//
-// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filters-aggregation.html
-type FiltersAggregation struct {
- unnamedFilters []Query
- namedFilters map[string]Query
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-// NewFiltersAggregation initializes a new FiltersAggregation.
-func NewFiltersAggregation() *FiltersAggregation {
- return &FiltersAggregation{
- unnamedFilters: make([]Query, 0),
- namedFilters: make(map[string]Query),
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-// Filter adds an unnamed filter. Notice that you can
-// either use named or unnamed filters, but not both.
-func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation {
- a.unnamedFilters = append(a.unnamedFilters, filter)
- return a
-}
-
-// Filters adds one or more unnamed filters. Notice that you can
-// either use named or unnamed filters, but not both.
-func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation {
- if len(filters) > 0 {
- a.unnamedFilters = append(a.unnamedFilters, filters...)
- }
- return a
-}
-
-// FilterWithName adds a filter with a specific name. Notice that you can
-// either use named or unnamed filters, but not both.
-func (a *FiltersAggregation) FilterWithName(name string, filter Query) *FiltersAggregation {
- a.namedFilters[name] = filter
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation {
- a.meta = metaData
- return a
-}
-
-// Source returns the a JSON-serializable interface.
-// If the aggregation is invalid, an error is returned. This may e.g. happen
-// if you mixed named and unnamed filters.
-func (a *FiltersAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "messages" : {
- // "filters" : {
- // "filters" : {
- // "errors" : { "term" : { "body" : "error" }},
- // "warnings" : { "term" : { "body" : "warning" }}
- // }
- // }
- // }
- // }
- // }
- // This method returns only the (outer) { "filters" : {} } part.
-
- source := make(map[string]interface{})
- filters := make(map[string]interface{})
- source["filters"] = filters
-
- if len(a.unnamedFilters) > 0 && len(a.namedFilters) > 0 {
- return nil, errors.New("elastic: use either named or unnamed filters with FiltersAggregation but not both")
- }
-
- if len(a.unnamedFilters) > 0 {
- arr := make([]interface{}, len(a.unnamedFilters))
- for i, filter := range a.unnamedFilters {
- src, err := filter.Source()
- if err != nil {
- return nil, err
- }
- arr[i] = src
- }
- filters["filters"] = arr
- } else {
- dict := make(map[string]interface{})
- for key, filter := range a.namedFilters {
- src, err := filter.Source()
- if err != nil {
- return nil, err
- }
- dict[key] = src
- }
- filters["filters"] = dict
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go
deleted file mode 100644
index 95cc8d7c3..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestFiltersAggregationFilters(t *testing.T) {
- f1 := NewRangeQuery("stock").Gt(0)
- f2 := NewTermQuery("symbol", "GOOG")
- agg := NewFiltersAggregation().Filters(f1, f2)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFiltersAggregationFilterWithName(t *testing.T) {
- f1 := NewRangeQuery("stock").Gt(0)
- f2 := NewTermQuery("symbol", "GOOG")
- agg := NewFiltersAggregation().
- FilterWithName("f1", f1).
- FilterWithName("f2", f2)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"filters":{"filters":{"f1":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"f2":{"term":{"symbol":"GOOG"}}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFiltersAggregationWithKeyedAndNonKeyedFilters(t *testing.T) {
- agg := NewFiltersAggregation().
- Filter(NewTermQuery("symbol", "MSFT")). // unnamed
- FilterWithName("one", NewTermQuery("symbol", "GOOG")) // named filter
- _, err := agg.Source()
- if err == nil {
- t.Fatal("expected error, got nil")
- }
-}
-
-func TestFiltersAggregationWithSubAggregation(t *testing.T) {
- avgPriceAgg := NewAvgAggregation().Field("price")
- f1 := NewRangeQuery("stock").Gt(0)
- f2 := NewTermQuery("symbol", "GOOG")
- agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFiltersAggregationWithMetaData(t *testing.T) {
- f1 := NewRangeQuery("stock").Gt(0)
- f2 := NewTermQuery("symbol", "GOOG")
- agg := NewFiltersAggregation().Filters(f1, f2).Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go
deleted file mode 100644
index c082fb3f2..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields
-// and conceptually works very similar to the range aggregation.
-// The user can define a point of origin and a set of distance range buckets.
-// The aggregation evaluate the distance of each document value from
-// the origin point and determines the buckets it belongs to based on
-// the ranges (a document belongs to a bucket if the distance between the
-// document and the origin falls within the distance range of the bucket).
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geodistance-aggregation.html
-type GeoDistanceAggregation struct {
- field string
- unit string
- distanceType string
- point string
- ranges []geoDistAggRange
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-type geoDistAggRange struct {
- Key string
- From interface{}
- To interface{}
-}
-
-func NewGeoDistanceAggregation() *GeoDistanceAggregation {
- return &GeoDistanceAggregation{
- subAggregations: make(map[string]Aggregation),
- ranges: make([]geoDistAggRange, 0),
- }
-}
-
-func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation {
- a.field = field
- return a
-}
-
-func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation {
- a.unit = unit
- return a
-}
-
-func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation {
- a.distanceType = distanceType
- return a
-}
-
-func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation {
- a.point = latLon
- return a
-}
-
-func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation {
- a.meta = metaData
- return a
-}
-func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
- return a
-}
-
-func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
- return a
-}
-
-func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil})
- return a
-}
-
-func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil})
- return a
-}
-
-func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to})
- return a
-}
-
-func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to})
- return a
-}
-
-func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
- return a
-}
-
-func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation {
- a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
- return a
-}
-
-func (a *GeoDistanceAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "rings_around_amsterdam" : {
- // "geo_distance" : {
- // "field" : "location",
- // "origin" : "52.3760, 4.894",
- // "ranges" : [
- // { "to" : 100 },
- // { "from" : 100, "to" : 300 },
- // { "from" : 300 }
- // ]
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "range" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["geo_distance"] = opts
-
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.unit != "" {
- opts["unit"] = a.unit
- }
- if a.distanceType != "" {
- opts["distance_type"] = a.distanceType
- }
- if a.point != "" {
- opts["origin"] = a.point
- }
-
- var ranges []interface{}
- for _, ent := range a.ranges {
- r := make(map[string]interface{})
- if ent.Key != "" {
- r["key"] = ent.Key
- }
- if ent.From != nil {
- switch from := ent.From.(type) {
- case int, int16, int32, int64, float32, float64:
- r["from"] = from
- case *int, *int16, *int32, *int64, *float32, *float64:
- r["from"] = from
- case string:
- r["from"] = from
- case *string:
- r["from"] = from
- }
- }
- if ent.To != nil {
- switch to := ent.To.(type) {
- case int, int16, int32, int64, float32, float64:
- r["to"] = to
- case *int, *int16, *int32, *int64, *float32, *float64:
- r["to"] = to
- case string:
- r["to"] = to
- case *string:
- r["to"] = to
- }
- }
- ranges = append(ranges, r)
- }
- opts["ranges"] = ranges
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go
deleted file mode 100644
index 3918b9dd2..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGeoDistanceAggregation(t *testing.T) {
- agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
- agg = agg.AddRange(nil, 100)
- agg = agg.AddRange(100, 300)
- agg = agg.AddRange(300, nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoDistanceAggregationWithPointers(t *testing.T) {
- hundred := 100
- threeHundred := 300
- agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
- agg = agg.AddRange(nil, &hundred)
- agg = agg.AddRange(hundred, &threeHundred)
- agg = agg.AddRange(threeHundred, nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoDistanceAggregationWithUnbounded(t *testing.T) {
- agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
- agg = agg.AddUnboundedFrom(100)
- agg = agg.AddRange(100, 300)
- agg = agg.AddUnboundedTo(300)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoDistanceAggregationWithMetaData(t *testing.T) {
- agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
- agg = agg.AddRange(nil, 100)
- agg = agg.AddRange(100, 300)
- agg = agg.AddRange(300, nil)
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go
deleted file mode 100644
index 07f61b331..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package elastic
-
-type GeoHashGridAggregation struct {
- field string
- precision int
- size int
- shardSize int
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewGeoHashGridAggregation() *GeoHashGridAggregation {
- return &GeoHashGridAggregation{
- subAggregations: make(map[string]Aggregation),
- precision: -1,
- size: -1,
- shardSize: -1,
- }
-}
-
-func (a *GeoHashGridAggregation) Field(field string) *GeoHashGridAggregation {
- a.field = field
- return a
-}
-
-func (a *GeoHashGridAggregation) Precision(precision int) *GeoHashGridAggregation {
- a.precision = precision
- return a
-}
-
-func (a *GeoHashGridAggregation) Size(size int) *GeoHashGridAggregation {
- a.size = size
- return a
-}
-
-func (a *GeoHashGridAggregation) ShardSize(shardSize int) *GeoHashGridAggregation {
- a.shardSize = shardSize
- return a
-}
-
-func (a *GeoHashGridAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoHashGridAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-func (a *GeoHashGridAggregation) Meta(metaData map[string]interface{}) *GeoHashGridAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *GeoHashGridAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs": {
- // "new_york": {
- // "geohash_grid": {
- // "field": "location",
- // "precision": 5
- // }
- // }
- // }
- // }
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["geohash_grid"] = opts
-
- if a.field != "" {
- opts["field"] = a.field
- }
-
- if a.precision != -1 {
- opts["precision"] = a.precision
- }
-
- if a.size != -1 {
- opts["size"] = a.size
- }
-
- if a.shardSize != -1 {
- opts["shard_size"] = a.shardSize
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go
deleted file mode 100644
index 044e211eb..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGeoHashGridAggregation(t *testing.T) {
- agg := NewGeoHashGridAggregation().Field("location").Precision(5)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
-
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("Marshalling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geohash_grid":{"field":"location","precision":5}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoHashGridAggregationWithMetaData(t *testing.T) {
- agg := NewGeoHashGridAggregation().Field("location").Precision(5)
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
-
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("Marshalling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geohash_grid":{"field":"location","precision":5},"meta":{"name":"Oliver"}}`
-
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoHashGridAggregationWithSize(t *testing.T) {
- agg := NewGeoHashGridAggregation().Field("location").Precision(5).Size(5)
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
-
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("Marshalling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geohash_grid":{"field":"location","precision":5,"size":5},"meta":{"name":"Oliver"}}`
-
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoHashGridAggregationWithShardSize(t *testing.T) {
- agg := NewGeoHashGridAggregation().Field("location").Precision(5).ShardSize(5)
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
-
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("Marshalling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geohash_grid":{"field":"location","precision":5,"shard_size":5},"meta":{"name":"Oliver"}}`
-
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_global.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_global.go
deleted file mode 100644
index 4bf2a63f8..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_global.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// GlobalAggregation defines a single bucket of all the documents within
-// the search execution context. This context is defined by the indices
-// and the document types you’re searching on, but is not influenced
-// by the search query itself.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-global-aggregation.html
-type GlobalAggregation struct {
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewGlobalAggregation() *GlobalAggregation {
- return &GlobalAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *GlobalAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "all_products" : {
- // "global" : {},
- // "aggs" : {
- // "avg_price" : { "avg" : { "field" : "price" } }
- // }
- // }
- // }
- // }
- // This method returns only the { "global" : {} } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["global"] = opts
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go
deleted file mode 100644
index 5f1e5e6cb..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGlobalAggregation(t *testing.T) {
- agg := NewGlobalAggregation()
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"global":{}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGlobalAggregationWithMetaData(t *testing.T) {
- agg := NewGlobalAggregation().Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"global":{},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go
deleted file mode 100644
index 8b698cff5..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// HistogramAggregation is a multi-bucket values source based aggregation
-// that can be applied on numeric values extracted from the documents.
-// It dynamically builds fixed size (a.k.a. interval) buckets over the
-// values.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-histogram-aggregation.html
-type HistogramAggregation struct {
- field string
- script *Script
- missing interface{}
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-
- interval float64
- order string
- orderAsc bool
- minDocCount *int64
- minBounds *float64
- maxBounds *float64
- offset *float64
-}
-
-func NewHistogramAggregation() *HistogramAggregation {
- return &HistogramAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *HistogramAggregation) Field(field string) *HistogramAggregation {
- a.field = field
- return a
-}
-
-func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation {
- a.script = script
- return a
-}
-
-// Missing configures the value to use when documents miss a value.
-func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation {
- a.missing = missing
- return a
-}
-
-func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation {
- a.meta = metaData
- return a
-}
-
-// Interval for this builder, must be greater than 0.
-func (a *HistogramAggregation) Interval(interval float64) *HistogramAggregation {
- a.interval = interval
- return a
-}
-
-// Order specifies the sort order. Valid values for order are:
-// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
-// with a metric.
-func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation {
- a.order = order
- a.orderAsc = asc
- return a
-}
-
-func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation {
- // "order" : { "_count" : "asc" }
- a.order = "_count"
- a.orderAsc = asc
- return a
-}
-
-func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation {
- return a.OrderByCount(true)
-}
-
-func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation {
- return a.OrderByCount(false)
-}
-
-func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation {
- // "order" : { "_key" : "asc" }
- a.order = "_key"
- a.orderAsc = asc
- return a
-}
-
-func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation {
- return a.OrderByKey(true)
-}
-
-func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation {
- return a.OrderByKey(false)
-}
-
-// OrderByAggregation creates a bucket ordering strategy which sorts buckets
-// based on a single-valued calc get.
-func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation {
- // {
- // "aggs" : {
- // "genders" : {
- // "terms" : {
- // "field" : "gender",
- // "order" : { "avg_height" : "desc" }
- // },
- // "aggs" : {
- // "avg_height" : { "avg" : { "field" : "height" } }
- // }
- // }
- // }
- // }
- a.order = aggName
- a.orderAsc = asc
- return a
-}
-
-// OrderByAggregationAndMetric creates a bucket ordering strategy which
-// sorts buckets based on a multi-valued calc get.
-func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation {
- // {
- // "aggs" : {
- // "genders" : {
- // "terms" : {
- // "field" : "gender",
- // "order" : { "height_stats.avg" : "desc" }
- // },
- // "aggs" : {
- // "height_stats" : { "stats" : { "field" : "height" } }
- // }
- // }
- // }
- // }
- a.order = aggName + "." + metric
- a.orderAsc = asc
- return a
-}
-
-func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation {
- a.minDocCount = &minDocCount
- return a
-}
-
-func (a *HistogramAggregation) ExtendedBounds(min, max float64) *HistogramAggregation {
- a.minBounds = &min
- a.maxBounds = &max
- return a
-}
-
-func (a *HistogramAggregation) ExtendedBoundsMin(min float64) *HistogramAggregation {
- a.minBounds = &min
- return a
-}
-
-func (a *HistogramAggregation) MinBounds(min float64) *HistogramAggregation {
- a.minBounds = &min
- return a
-}
-
-func (a *HistogramAggregation) ExtendedBoundsMax(max float64) *HistogramAggregation {
- a.maxBounds = &max
- return a
-}
-
-func (a *HistogramAggregation) MaxBounds(max float64) *HistogramAggregation {
- a.maxBounds = &max
- return a
-}
-
-// Offset into the histogram
-func (a *HistogramAggregation) Offset(offset float64) *HistogramAggregation {
- a.offset = &offset
- return a
-}
-
-func (a *HistogramAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "prices" : {
- // "histogram" : {
- // "field" : "price",
- // "interval" : 50
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "histogram" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["histogram"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.missing != nil {
- opts["missing"] = a.missing
- }
-
- opts["interval"] = a.interval
- if a.order != "" {
- o := make(map[string]interface{})
- if a.orderAsc {
- o[a.order] = "asc"
- } else {
- o[a.order] = "desc"
- }
- opts["order"] = o
- }
- if a.offset != nil {
- opts["offset"] = *a.offset
- }
- if a.minDocCount != nil {
- opts["min_doc_count"] = *a.minDocCount
- }
- if a.minBounds != nil || a.maxBounds != nil {
- bounds := make(map[string]interface{})
- if a.minBounds != nil {
- bounds["min"] = a.minBounds
- }
- if a.maxBounds != nil {
- bounds["max"] = a.maxBounds
- }
- opts["extended_bounds"] = bounds
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go
deleted file mode 100644
index aeb7eec54..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestHistogramAggregation(t *testing.T) {
- agg := NewHistogramAggregation().Field("price").Interval(50)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"histogram":{"field":"price","interval":50}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHistogramAggregationWithMetaData(t *testing.T) {
- agg := NewHistogramAggregation().Field("price").Offset(10).Interval(50).Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"histogram":{"field":"price","interval":50,"offset":10},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHistogramAggregationWithMissing(t *testing.T) {
- agg := NewHistogramAggregation().Field("price").Interval(50).Missing("n/a")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"histogram":{"field":"price","interval":50,"missing":"n/a"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go
deleted file mode 100644
index 3615e29c3..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// IPRangeAggregation is a range aggregation that is dedicated for
-// IP addresses.
-//
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-iprange-aggregation.html
-type IPRangeAggregation struct {
- field string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- keyed *bool
- entries []IPRangeAggregationEntry
-}
-
-type IPRangeAggregationEntry struct {
- Key string
- Mask string
- From string
- To string
-}
-
-func NewIPRangeAggregation() *IPRangeAggregation {
- return &IPRangeAggregation{
- subAggregations: make(map[string]Aggregation),
- entries: make([]IPRangeAggregationEntry, 0),
- }
-}
-
-func (a *IPRangeAggregation) Field(field string) *IPRangeAggregation {
- a.field = field
- return a
-}
-
-func (a *IPRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *IPRangeAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *IPRangeAggregation) Meta(metaData map[string]interface{}) *IPRangeAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *IPRangeAggregation) Keyed(keyed bool) *IPRangeAggregation {
- a.keyed = &keyed
- return a
-}
-
-func (a *IPRangeAggregation) AddMaskRange(mask string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Mask: mask})
- return a
-}
-
-func (a *IPRangeAggregation) AddMaskRangeWithKey(key, mask string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, Mask: mask})
- return a
-}
-
-func (a *IPRangeAggregation) AddRange(from, to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to})
- return a
-}
-
-func (a *IPRangeAggregation) AddRangeWithKey(key, from, to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to})
- return a
-}
-
-func (a *IPRangeAggregation) AddUnboundedTo(from string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""})
- return a
-}
-
-func (a *IPRangeAggregation) AddUnboundedToWithKey(key, from string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""})
- return a
-}
-
-func (a *IPRangeAggregation) AddUnboundedFrom(to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to})
- return a
-}
-
-func (a *IPRangeAggregation) AddUnboundedFromWithKey(key, to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to})
- return a
-}
-
-func (a *IPRangeAggregation) Lt(to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to})
- return a
-}
-
-func (a *IPRangeAggregation) LtWithKey(key, to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to})
- return a
-}
-
-func (a *IPRangeAggregation) Between(from, to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to})
- return a
-}
-
-func (a *IPRangeAggregation) BetweenWithKey(key, from, to string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to})
- return a
-}
-
-func (a *IPRangeAggregation) Gt(from string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""})
- return a
-}
-
-func (a *IPRangeAggregation) GtWithKey(key, from string) *IPRangeAggregation {
- a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""})
- return a
-}
-
-func (a *IPRangeAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "range" : {
- // "ip_range": {
- // "field": "ip",
- // "ranges": [
- // { "to": "10.0.0.5" },
- // { "from": "10.0.0.5" }
- // ]
- // }
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "ip_range" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["ip_range"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
-
- if a.keyed != nil {
- opts["keyed"] = *a.keyed
- }
-
- var ranges []interface{}
- for _, ent := range a.entries {
- r := make(map[string]interface{})
- if ent.Key != "" {
- r["key"] = ent.Key
- }
- if ent.Mask != "" {
- r["mask"] = ent.Mask
- } else {
- if ent.From != "" {
- r["from"] = ent.From
- }
- if ent.To != "" {
- r["to"] = ent.To
- }
- }
- ranges = append(ranges, r)
- }
- opts["ranges"] = ranges
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go
deleted file mode 100644
index 7a2b49f4c..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestIPRangeAggregation(t *testing.T) {
- agg := NewIPRangeAggregation().Field("remote_ip")
- agg = agg.AddRange("", "10.0.0.0")
- agg = agg.AddRange("10.1.0.0", "10.1.255.255")
- agg = agg.AddRange("10.2.0.0", "")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"ip_range":{"field":"remote_ip","ranges":[{"to":"10.0.0.0"},{"from":"10.1.0.0","to":"10.1.255.255"},{"from":"10.2.0.0"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestIPRangeAggregationMask(t *testing.T) {
- agg := NewIPRangeAggregation().Field("remote_ip")
- agg = agg.AddMaskRange("10.0.0.0/25")
- agg = agg.AddMaskRange("10.0.0.127/25")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"ip_range":{"field":"remote_ip","ranges":[{"mask":"10.0.0.0/25"},{"mask":"10.0.0.127/25"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestIPRangeAggregationWithKeyedFlag(t *testing.T) {
- agg := NewIPRangeAggregation().Field("remote_ip")
- agg = agg.Keyed(true)
- agg = agg.AddRange("", "10.0.0.0")
- agg = agg.AddRange("10.1.0.0", "10.1.255.255")
- agg = agg.AddRange("10.2.0.0", "")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"ip_range":{"field":"remote_ip","keyed":true,"ranges":[{"to":"10.0.0.0"},{"from":"10.1.0.0","to":"10.1.255.255"},{"from":"10.2.0.0"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestIPRangeAggregationWithKeys(t *testing.T) {
- agg := NewIPRangeAggregation().Field("remote_ip")
- agg = agg.Keyed(true)
- agg = agg.LtWithKey("infinity", "10.0.0.5")
- agg = agg.GtWithKey("and-beyond", "10.0.0.5")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"ip_range":{"field":"remote_ip","keyed":true,"ranges":[{"key":"infinity","to":"10.0.0.5"},{"from":"10.0.0.5","key":"and-beyond"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go
deleted file mode 100644
index 7ba3cb636..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MissingAggregation is a field data based single bucket aggregation,
-// that creates a bucket of all documents in the current document set context
-// that are missing a field value (effectively, missing a field or having
-// the configured NULL value set). This aggregator will often be used in
-// conjunction with other field data bucket aggregators (such as ranges)
-// to return information for all the documents that could not be placed
-// in any of the other buckets due to missing field data values.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-missing-aggregation.html
-type MissingAggregation struct {
- field string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewMissingAggregation() *MissingAggregation {
- return &MissingAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *MissingAggregation) Field(field string) *MissingAggregation {
- a.field = field
- return a
-}
-
-func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *MissingAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "products_without_a_price" : {
- // "missing" : { "field" : "price" }
- // }
- // }
- // }
- // This method returns only the { "missing" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["missing"] = opts
-
- if a.field != "" {
- opts["field"] = a.field
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go
deleted file mode 100644
index 179c3084f..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMissingAggregation(t *testing.T) {
- agg := NewMissingAggregation().Field("price")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"missing":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMissingAggregationWithMetaData(t *testing.T) {
- agg := NewMissingAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"missing":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go
deleted file mode 100644
index 926d493a1..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// NestedAggregation is a special single bucket aggregation that enables
-// aggregating nested documents.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-nested-aggregation.html
-type NestedAggregation struct {
- path string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewNestedAggregation() *NestedAggregation {
- return &NestedAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *NestedAggregation) Path(path string) *NestedAggregation {
- a.path = path
- return a
-}
-
-func (a *NestedAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "query" : {
- // "match" : { "name" : "led tv" }
- // }
- // "aggs" : {
- // "resellers" : {
- // "nested" : {
- // "path" : "resellers"
- // },
- // "aggs" : {
- // "min_price" : { "min" : { "field" : "resellers.price" } }
- // }
- // }
- // }
- // }
- // This method returns only the { "nested" : {} } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["nested"] = opts
-
- opts["path"] = a.path
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go
deleted file mode 100644
index 219943e3d..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestNestedAggregation(t *testing.T) {
- agg := NewNestedAggregation().Path("resellers")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"nested":{"path":"resellers"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestNestedAggregationWithSubAggregation(t *testing.T) {
- minPriceAgg := NewMinAggregation().Field("resellers.price")
- agg := NewNestedAggregation().Path("resellers").SubAggregation("min_price", minPriceAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"min_price":{"min":{"field":"resellers.price"}}},"nested":{"path":"resellers"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestNestedAggregationWithMetaData(t *testing.T) {
- agg := NewNestedAggregation().Path("resellers").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"nested":{"path":"resellers"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_range.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_range.go
deleted file mode 100644
index 28c3df78e..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_range.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "time"
-)
-
-// RangeAggregation is a multi-bucket value source based aggregation that
-// enables the user to define a set of ranges - each representing a bucket.
-// During the aggregation process, the values extracted from each document
-// will be checked against each bucket range and "bucket" the
-// relevant/matching document. Note that this aggregration includes the
-// from value and excludes the to value for each range.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html
-type RangeAggregation struct {
- field string
- script *Script
- missing interface{}
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- keyed *bool
- unmapped *bool
- entries []rangeAggregationEntry
-}
-
-type rangeAggregationEntry struct {
- Key string
- From interface{}
- To interface{}
-}
-
-func NewRangeAggregation() *RangeAggregation {
- return &RangeAggregation{
- subAggregations: make(map[string]Aggregation),
- entries: make([]rangeAggregationEntry, 0),
- }
-}
-
-func (a *RangeAggregation) Field(field string) *RangeAggregation {
- a.field = field
- return a
-}
-
-func (a *RangeAggregation) Script(script *Script) *RangeAggregation {
- a.script = script
- return a
-}
-
-// Missing configures the value to use when documents miss a value.
-func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation {
- a.missing = missing
- return a
-}
-
-func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation {
- a.keyed = &keyed
- return a
-}
-
-func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation {
- a.unmapped = &unmapped
- return a
-}
-
-func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
- return a
-}
-
-func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
- return a
-}
-
-func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
- return a
-}
-
-func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
- return a
-}
-
-func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
- return a
-}
-
-func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
- return a
-}
-
-func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
- return a
-}
-
-func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
- return a
-}
-
-func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
- return a
-}
-
-func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
- return a
-}
-
-func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
- return a
-}
-
-func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation {
- a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
- return a
-}
-
-func (a *RangeAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "price_ranges" : {
- // "range" : {
- // "field" : "price",
- // "ranges" : [
- // { "to" : 50 },
- // { "from" : 50, "to" : 100 },
- // { "from" : 100 }
- // ]
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "range" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["range"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.missing != nil {
- opts["missing"] = a.missing
- }
-
- if a.keyed != nil {
- opts["keyed"] = *a.keyed
- }
- if a.unmapped != nil {
- opts["unmapped"] = *a.unmapped
- }
-
- var ranges []interface{}
- for _, ent := range a.entries {
- r := make(map[string]interface{})
- if ent.Key != "" {
- r["key"] = ent.Key
- }
- if ent.From != nil {
- switch from := ent.From.(type) {
- case int, int16, int32, int64, float32, float64:
- r["from"] = from
- case *int, *int16, *int32, *int64, *float32, *float64:
- r["from"] = from
- case time.Time:
- r["from"] = from.Format(time.RFC3339)
- case *time.Time:
- r["from"] = from.Format(time.RFC3339)
- case string:
- r["from"] = from
- case *string:
- r["from"] = from
- }
- }
- if ent.To != nil {
- switch to := ent.To.(type) {
- case int, int16, int32, int64, float32, float64:
- r["to"] = to
- case *int, *int16, *int32, *int64, *float32, *float64:
- r["to"] = to
- case time.Time:
- r["to"] = to.Format(time.RFC3339)
- case *time.Time:
- r["to"] = to.Format(time.RFC3339)
- case string:
- r["to"] = to
- case *string:
- r["to"] = to
- }
- }
- ranges = append(ranges, r)
- }
- opts["ranges"] = ranges
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go
deleted file mode 100644
index 17fbcecf3..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestRangeAggregation(t *testing.T) {
- agg := NewRangeAggregation().Field("price")
- agg = agg.AddRange(nil, 50)
- agg = agg.AddRange(50, 100)
- agg = agg.AddRange(100, nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeAggregationWithPointers(t *testing.T) {
- fifty := 50
- hundred := 100
- agg := NewRangeAggregation().Field("price")
- agg = agg.AddRange(nil, &fifty)
- agg = agg.AddRange(fifty, &hundred)
- agg = agg.AddRange(hundred, nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeAggregationWithUnbounded(t *testing.T) {
- agg := NewRangeAggregation().Field("field_name").
- AddUnboundedFrom(50).
- AddRange(20, 70).
- AddRange(70, 120).
- AddUnboundedTo(150)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeAggregationWithLtAndCo(t *testing.T) {
- agg := NewRangeAggregation().Field("field_name").
- Lt(50).
- Between(20, 70).
- Between(70, 120).
- Gt(150)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeAggregationWithKeyedFlag(t *testing.T) {
- agg := NewRangeAggregation().Field("field_name").
- Keyed(true).
- Lt(50).
- Between(20, 70).
- Between(70, 120).
- Gt(150)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeAggregationWithKeys(t *testing.T) {
- agg := NewRangeAggregation().Field("field_name").
- Keyed(true).
- LtWithKey("cheap", 50).
- BetweenWithKey("affordable", 20, 70).
- BetweenWithKey("average", 70, 120).
- GtWithKey("expensive", 150)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"key":"cheap","to":50},{"from":20,"key":"affordable","to":70},{"from":70,"key":"average","to":120},{"from":150,"key":"expensive"}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeAggregationWithMetaData(t *testing.T) {
- agg := NewRangeAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
- agg = agg.AddRange(nil, 50)
- agg = agg.AddRange(50, 100)
- agg = agg.AddRange(100, nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeAggregationWithMissing(t *testing.T) {
- agg := NewRangeAggregation().Field("price").Missing(0)
- agg = agg.AddRange(nil, 50)
- agg = agg.AddRange(50, 100)
- agg = agg.AddRange(100, nil)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"field":"price","missing":0,"ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go
deleted file mode 100644
index 9e4680195..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// ReverseNestedAggregation defines a special single bucket aggregation
-// that enables aggregating on parent docs from nested documents.
-// Effectively this aggregation can break out of the nested block
-// structure and link to other nested structures or the root document,
-// which allows nesting other aggregations that aren’t part of
-// the nested object in a nested aggregation.
-//
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-reverse-nested-aggregation.html
-type ReverseNestedAggregation struct {
- path string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-// NewReverseNestedAggregation initializes a new ReverseNestedAggregation
-// bucket aggregation.
-func NewReverseNestedAggregation() *ReverseNestedAggregation {
- return &ReverseNestedAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-// Path set the path to use for this nested aggregation. The path must match
-// the path to a nested object in the mappings. If it is not specified
-// then this aggregation will go back to the root document.
-func (a *ReverseNestedAggregation) Path(path string) *ReverseNestedAggregation {
- a.path = path
- return a
-}
-
-func (a *ReverseNestedAggregation) SubAggregation(name string, subAggregation Aggregation) *ReverseNestedAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *ReverseNestedAggregation) Meta(metaData map[string]interface{}) *ReverseNestedAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *ReverseNestedAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "reverse_nested" : {
- // "path": "..."
- // }
- // }
- // }
- // This method returns only the { "reverse_nested" : {} } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["reverse_nested"] = opts
-
- if a.path != "" {
- opts["path"] = a.path
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go
deleted file mode 100644
index dc50bbc28..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestReverseNestedAggregation(t *testing.T) {
- agg := NewReverseNestedAggregation()
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"reverse_nested":{}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestReverseNestedAggregationWithPath(t *testing.T) {
- agg := NewReverseNestedAggregation().Path("comments")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"reverse_nested":{"path":"comments"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestReverseNestedAggregationWithSubAggregation(t *testing.T) {
- avgPriceAgg := NewAvgAggregation().Field("price")
- agg := NewReverseNestedAggregation().
- Path("a_path").
- SubAggregation("avg_price", avgPriceAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"reverse_nested":{"path":"a_path"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestReverseNestedAggregationWithMeta(t *testing.T) {
- agg := NewReverseNestedAggregation().
- Path("a_path").
- Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"reverse_nested":{"path":"a_path"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go
deleted file mode 100644
index 0fd729dfd..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// SamplerAggregation is a filtering aggregation used to limit any
-// sub aggregations' processing to a sample of the top-scoring documents.
-// Optionally, diversity settings can be used to limit the number of matches
-// that share a common value such as an "author".
-//
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-sampler-aggregation.html
-type SamplerAggregation struct {
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-
- shardSize int
- maxDocsPerValue int
- executionHint string
-}
-
-func NewSamplerAggregation() *SamplerAggregation {
- return &SamplerAggregation{
- shardSize: -1,
- maxDocsPerValue: -1,
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation {
- a.meta = metaData
- return a
-}
-
-// ShardSize sets the maximum number of docs returned from each shard.
-func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation {
- a.shardSize = shardSize
- return a
-}
-
-func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation {
- a.maxDocsPerValue = maxDocsPerValue
- return a
-}
-
-func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation {
- a.executionHint = hint
- return a
-}
-
-func (a *SamplerAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "sample" : {
- // "sampler" : {
- // "shard_size" : 200
- // },
- // "aggs": {
- // "keywords": {
- // "significant_terms": {
- // "field": "text"
- // }
- // }
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "sampler" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["sampler"] = opts
-
- if a.shardSize >= 0 {
- opts["shard_size"] = a.shardSize
- }
- if a.maxDocsPerValue >= 0 {
- opts["max_docs_per_value"] = a.maxDocsPerValue
- }
- if a.executionHint != "" {
- opts["execution_hint"] = a.executionHint
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go
deleted file mode 100644
index c4dc1c7cc..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSamplerAggregation(t *testing.T) {
- keywordsAgg := NewSignificantTermsAggregation().Field("text")
- agg := NewSamplerAggregation().
- ShardSize(200).
- SubAggregation("keywords", keywordsAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"shard_size":200}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go
deleted file mode 100644
index 571a91217..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// SignificantTermsAggregation is an aggregation that returns interesting
-// or unusual occurrences of terms in a set.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html
-type SignificantTermsAggregation struct {
- field string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-
- minDocCount *int
- shardMinDocCount *int
- requiredSize *int
- shardSize *int
- filter Query
- executionHint string
- significanceHeuristic SignificanceHeuristic
-}
-
-func NewSignificantTermsAggregation() *SignificantTermsAggregation {
- return &SignificantTermsAggregation{
- subAggregations: make(map[string]Aggregation, 0),
- }
-}
-
-func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation {
- a.field = field
- return a
-}
-
-func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation {
- a.minDocCount = &minDocCount
- return a
-}
-
-func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation {
- a.shardMinDocCount = &shardMinDocCount
- return a
-}
-
-func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation {
- a.requiredSize = &requiredSize
- return a
-}
-
-func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation {
- a.shardSize = &shardSize
- return a
-}
-
-func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation {
- a.filter = filter
- return a
-}
-
-func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation {
- a.executionHint = hint
- return a
-}
-
-func (a *SignificantTermsAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTermsAggregation {
- a.significanceHeuristic = heuristic
- return a
-}
-
-func (a *SignificantTermsAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "query" : {
- // "terms" : {"force" : [ "British Transport Police" ]}
- // },
- // "aggregations" : {
- // "significantCrimeTypes" : {
- // "significant_terms" : { "field" : "crime_type" }
- // }
- // }
- // }
- //
- // This method returns only the
- // { "significant_terms" : { "field" : "crime_type" }
- // part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["significant_terms"] = opts
-
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.requiredSize != nil {
- opts["size"] = *a.requiredSize // not a typo!
- }
- if a.shardSize != nil {
- opts["shard_size"] = *a.shardSize
- }
- if a.minDocCount != nil {
- opts["min_doc_count"] = *a.minDocCount
- }
- if a.shardMinDocCount != nil {
- opts["shard_min_doc_count"] = *a.shardMinDocCount
- }
- if a.executionHint != "" {
- opts["execution_hint"] = a.executionHint
- }
- if a.filter != nil {
- src, err := a.filter.Source()
- if err != nil {
- return nil, err
- }
- opts["background_filter"] = src
- }
- if a.significanceHeuristic != nil {
- name := a.significanceHeuristic.Name()
- src, err := a.significanceHeuristic.Source()
- if err != nil {
- return nil, err
- }
- opts[name] = src
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
-
-// -- Significance heuristics --
-
-type SignificanceHeuristic interface {
- Name() string
- Source() (interface{}, error)
-}
-
-// -- Chi Square --
-
-// ChiSquareSignificanceHeuristic implements Chi square as described
-// in "Information Retrieval", Manning et al., Chapter 13.5.2.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_chi_square
-// for details.
-type ChiSquareSignificanceHeuristic struct {
- backgroundIsSuperset *bool
- includeNegatives *bool
-}
-
-// NewChiSquareSignificanceHeuristic initializes a new ChiSquareSignificanceHeuristic.
-func NewChiSquareSignificanceHeuristic() *ChiSquareSignificanceHeuristic {
- return &ChiSquareSignificanceHeuristic{}
-}
-
-// Name returns the name of the heuristic in the REST interface.
-func (sh *ChiSquareSignificanceHeuristic) Name() string {
- return "chi_square"
-}
-
-// BackgroundIsSuperset indicates whether you defined a custom background
-// filter that represents a difference set of documents that you want to
-// compare to.
-func (sh *ChiSquareSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *ChiSquareSignificanceHeuristic {
- sh.backgroundIsSuperset = &backgroundIsSuperset
- return sh
-}
-
-// IncludeNegatives indicates whether to filter out the terms that appear
-// much less in the subset than in the background without the subset.
-func (sh *ChiSquareSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *ChiSquareSignificanceHeuristic {
- sh.includeNegatives = &includeNegatives
- return sh
-}
-
-// Source returns the parameters that need to be added to the REST parameters.
-func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if sh.backgroundIsSuperset != nil {
- source["background_is_superset"] = *sh.backgroundIsSuperset
- }
- if sh.includeNegatives != nil {
- source["include_negatives"] = *sh.includeNegatives
- }
- return source, nil
-}
-
-// -- GND --
-
-// GNDSignificanceHeuristic implements the "Google Normalized Distance"
-// as described in "The Google Similarity Distance", Cilibrasi and Vitanyi,
-// 2007.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance
-// for details.
-type GNDSignificanceHeuristic struct {
- backgroundIsSuperset *bool
-}
-
-// NewGNDSignificanceHeuristic implements a new GNDSignificanceHeuristic.
-func NewGNDSignificanceHeuristic() *GNDSignificanceHeuristic {
- return &GNDSignificanceHeuristic{}
-}
-
-// Name returns the name of the heuristic in the REST interface.
-func (sh *GNDSignificanceHeuristic) Name() string {
- return "gnd"
-}
-
-// BackgroundIsSuperset indicates whether you defined a custom background
-// filter that represents a difference set of documents that you want to
-// compare to.
-func (sh *GNDSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *GNDSignificanceHeuristic {
- sh.backgroundIsSuperset = &backgroundIsSuperset
- return sh
-}
-
-// Source returns the parameters that need to be added to the REST parameters.
-func (sh *GNDSignificanceHeuristic) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if sh.backgroundIsSuperset != nil {
- source["background_is_superset"] = *sh.backgroundIsSuperset
- }
- return source, nil
-}
-
-// -- JLH Score --
-
-// JLHScoreSignificanceHeuristic implements the JLH score as described in
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score.
-type JLHScoreSignificanceHeuristic struct{}
-
-// NewJLHScoreSignificanceHeuristic initializes a new JLHScoreSignificanceHeuristic.
-func NewJLHScoreSignificanceHeuristic() *JLHScoreSignificanceHeuristic {
- return &JLHScoreSignificanceHeuristic{}
-}
-
-// Name returns the name of the heuristic in the REST interface.
-func (sh *JLHScoreSignificanceHeuristic) Name() string {
- return "jlh"
-}
-
-// Source returns the parameters that need to be added to the REST parameters.
-func (sh *JLHScoreSignificanceHeuristic) Source() (interface{}, error) {
- source := make(map[string]interface{})
- return source, nil
-}
-
-// -- Mutual Information --
-
-// MutualInformationSignificanceHeuristic implements Mutual information
-// as described in "Information Retrieval", Manning et al., Chapter 13.5.1.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information
-// for details.
-type MutualInformationSignificanceHeuristic struct {
- backgroundIsSuperset *bool
- includeNegatives *bool
-}
-
-// NewMutualInformationSignificanceHeuristic initializes a new instance of
-// MutualInformationSignificanceHeuristic.
-func NewMutualInformationSignificanceHeuristic() *MutualInformationSignificanceHeuristic {
- return &MutualInformationSignificanceHeuristic{}
-}
-
-// Name returns the name of the heuristic in the REST interface.
-func (sh *MutualInformationSignificanceHeuristic) Name() string {
- return "mutual_information"
-}
-
-// BackgroundIsSuperset indicates whether you defined a custom background
-// filter that represents a difference set of documents that you want to
-// compare to.
-func (sh *MutualInformationSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *MutualInformationSignificanceHeuristic {
- sh.backgroundIsSuperset = &backgroundIsSuperset
- return sh
-}
-
-// IncludeNegatives indicates whether to filter out the terms that appear
-// much less in the subset than in the background without the subset.
-func (sh *MutualInformationSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *MutualInformationSignificanceHeuristic {
- sh.includeNegatives = &includeNegatives
- return sh
-}
-
-// Source returns the parameters that need to be added to the REST parameters.
-func (sh *MutualInformationSignificanceHeuristic) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if sh.backgroundIsSuperset != nil {
- source["background_is_superset"] = *sh.backgroundIsSuperset
- }
- if sh.includeNegatives != nil {
- source["include_negatives"] = *sh.includeNegatives
- }
- return source, nil
-}
-
-// -- Percentage Score --
-
-// PercentageScoreSignificanceHeuristic implements the algorithm described
-// in https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_percentage.
-type PercentageScoreSignificanceHeuristic struct{}
-
-// NewPercentageScoreSignificanceHeuristic initializes a new instance of
-// PercentageScoreSignificanceHeuristic.
-func NewPercentageScoreSignificanceHeuristic() *PercentageScoreSignificanceHeuristic {
- return &PercentageScoreSignificanceHeuristic{}
-}
-
-// Name returns the name of the heuristic in the REST interface.
-func (sh *PercentageScoreSignificanceHeuristic) Name() string {
- return "percentage"
-}
-
-// Source returns the parameters that need to be added to the REST parameters.
-func (sh *PercentageScoreSignificanceHeuristic) Source() (interface{}, error) {
- source := make(map[string]interface{})
- return source, nil
-}
-
-// -- Script --
-
-// ScriptSignificanceHeuristic implements a scripted significance heuristic.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted
-// for details.
-type ScriptSignificanceHeuristic struct {
- script *Script
-}
-
-// NewScriptSignificanceHeuristic initializes a new instance of
-// ScriptSignificanceHeuristic.
-func NewScriptSignificanceHeuristic() *ScriptSignificanceHeuristic {
- return &ScriptSignificanceHeuristic{}
-}
-
-// Name returns the name of the heuristic in the REST interface.
-func (sh *ScriptSignificanceHeuristic) Name() string {
- return "script_heuristic"
-}
-
-// Script specifies the script to use to get custom scores. The following
-// parameters are available in the script: `_subset_freq`, `_superset_freq`,
-// `_subset_size`, and `_superset_size`.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted
-// for details.
-func (sh *ScriptSignificanceHeuristic) Script(script *Script) *ScriptSignificanceHeuristic {
- sh.script = script
- return sh
-}
-
-// Source returns the parameters that need to be added to the REST parameters.
-func (sh *ScriptSignificanceHeuristic) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if sh.script != nil {
- src, err := sh.script.Source()
- if err != nil {
- return nil, err
- }
- source["script"] = src
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go
deleted file mode 100644
index a5b269671..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSignificantTermsAggregation(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"field":"crime_type"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithArgs(t *testing.T) {
- agg := NewSignificantTermsAggregation().
- Field("crime_type").
- ExecutionHint("map").
- ShardSize(5).
- MinDocCount(10).
- BackgroundFilter(NewTermQuery("city", "London"))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"background_filter":{"term":{"city":"London"}},"execution_hint":"map","field":"crime_type","min_doc_count":10,"shard_size":5}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationSubAggregation(t *testing.T) {
- crimeTypesAgg := NewSignificantTermsAggregation().Field("crime_type")
- agg := NewTermsAggregation().Field("force")
- agg = agg.SubAggregation("significantCrimeTypes", crimeTypesAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"significantCrimeTypes":{"significant_terms":{"field":"crime_type"}}},"terms":{"field":"force"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithMetaData(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"significant_terms":{"field":"crime_type"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithChiSquare(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- agg = agg.SignificanceHeuristic(
- NewChiSquareSignificanceHeuristic().
- BackgroundIsSuperset(true).
- IncludeNegatives(false),
- )
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"chi_square":{"background_is_superset":true,"include_negatives":false},"field":"crime_type"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithGND(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- agg = agg.SignificanceHeuristic(
- NewGNDSignificanceHeuristic(),
- )
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"field":"crime_type","gnd":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithJLH(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- agg = agg.SignificanceHeuristic(
- NewJLHScoreSignificanceHeuristic(),
- )
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"field":"crime_type","jlh":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithMutualInformation(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- agg = agg.SignificanceHeuristic(
- NewMutualInformationSignificanceHeuristic().
- BackgroundIsSuperset(false).
- IncludeNegatives(true),
- )
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"field":"crime_type","mutual_information":{"background_is_superset":false,"include_negatives":true}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithPercentageScore(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- agg = agg.SignificanceHeuristic(
- NewPercentageScoreSignificanceHeuristic(),
- )
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"field":"crime_type","percentage":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTermsAggregationWithScript(t *testing.T) {
- agg := NewSignificantTermsAggregation().Field("crime_type")
- agg = agg.SignificanceHeuristic(
- NewScriptSignificanceHeuristic().
- Script(NewScript("_subset_freq/(_superset_freq - _subset_freq + 1)")),
- )
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_terms":{"field":"crime_type","script_heuristic":{"script":{"source":"_subset_freq/(_superset_freq - _subset_freq + 1)"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go
deleted file mode 100644
index de761613c..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// SignificantTextAggregation returns interesting or unusual occurrences
-// of free-text terms in a set.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significanttext-aggregation.html
-type SignificantTextAggregation struct {
- field string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-
- sourceFieldNames []string
- filterDuplicateText *bool
- includeExclude *TermsAggregationIncludeExclude
- filter Query
- bucketCountThresholds *BucketCountThresholds
- significanceHeuristic SignificanceHeuristic
-}
-
-func NewSignificantTextAggregation() *SignificantTextAggregation {
- return &SignificantTextAggregation{
- subAggregations: make(map[string]Aggregation, 0),
- }
-}
-
-func (a *SignificantTextAggregation) Field(field string) *SignificantTextAggregation {
- a.field = field
- return a
-}
-
-func (a *SignificantTextAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTextAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *SignificantTextAggregation) Meta(metaData map[string]interface{}) *SignificantTextAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *SignificantTextAggregation) SourceFieldNames(names ...string) *SignificantTextAggregation {
- a.sourceFieldNames = names
- return a
-}
-
-func (a *SignificantTextAggregation) FilterDuplicateText(filter bool) *SignificantTextAggregation {
- a.filterDuplicateText = &filter
- return a
-}
-
-func (a *SignificantTextAggregation) MinDocCount(minDocCount int64) *SignificantTextAggregation {
- if a.bucketCountThresholds == nil {
- a.bucketCountThresholds = &BucketCountThresholds{}
- }
- a.bucketCountThresholds.MinDocCount = &minDocCount
- return a
-}
-
-func (a *SignificantTextAggregation) ShardMinDocCount(shardMinDocCount int64) *SignificantTextAggregation {
- if a.bucketCountThresholds == nil {
- a.bucketCountThresholds = &BucketCountThresholds{}
- }
- a.bucketCountThresholds.ShardMinDocCount = &shardMinDocCount
- return a
-}
-
-func (a *SignificantTextAggregation) Size(size int) *SignificantTextAggregation {
- if a.bucketCountThresholds == nil {
- a.bucketCountThresholds = &BucketCountThresholds{}
- }
- a.bucketCountThresholds.RequiredSize = &size
- return a
-}
-
-func (a *SignificantTextAggregation) ShardSize(shardSize int) *SignificantTextAggregation {
- if a.bucketCountThresholds == nil {
- a.bucketCountThresholds = &BucketCountThresholds{}
- }
- a.bucketCountThresholds.ShardSize = &shardSize
- return a
-}
-
-func (a *SignificantTextAggregation) BackgroundFilter(filter Query) *SignificantTextAggregation {
- a.filter = filter
- return a
-}
-
-func (a *SignificantTextAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTextAggregation {
- a.significanceHeuristic = heuristic
- return a
-}
-
-func (a *SignificantTextAggregation) Include(regexp string) *SignificantTextAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.Include = regexp
- return a
-}
-
-func (a *SignificantTextAggregation) IncludeValues(values ...interface{}) *SignificantTextAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...)
- return a
-}
-
-func (a *SignificantTextAggregation) Exclude(regexp string) *SignificantTextAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.Exclude = regexp
- return a
-}
-
-func (a *SignificantTextAggregation) ExcludeValues(values ...interface{}) *SignificantTextAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...)
- return a
-}
-
-func (a *SignificantTextAggregation) Partition(p int) *SignificantTextAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.Partition = p
- return a
-}
-
-func (a *SignificantTextAggregation) NumPartitions(n int) *SignificantTextAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.NumPartitions = n
- return a
-}
-
-func (a *SignificantTextAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "query" : {
- // "match" : {"content" : "Bird flu"}
- // },
- // "aggregations" : {
- // "my_sample" : {
- // "sampler": {
- // "shard_size" : 100
- // },
- // "aggregations": {
- // "keywords" : {
- // "significant_text" : { "field" : "content" }
- // }
- // }
- // }
- // }
- // }
- //
- // This method returns only the
- // { "significant_text" : { "field" : "content" }
- // part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["significant_text"] = opts
-
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.bucketCountThresholds != nil {
- if a.bucketCountThresholds.RequiredSize != nil {
- opts["size"] = (*a.bucketCountThresholds).RequiredSize
- }
- if a.bucketCountThresholds.ShardSize != nil {
- opts["shard_size"] = (*a.bucketCountThresholds).ShardSize
- }
- if a.bucketCountThresholds.MinDocCount != nil {
- opts["min_doc_count"] = (*a.bucketCountThresholds).MinDocCount
- }
- if a.bucketCountThresholds.ShardMinDocCount != nil {
- opts["shard_min_doc_count"] = (*a.bucketCountThresholds).ShardMinDocCount
- }
- }
- if a.filter != nil {
- src, err := a.filter.Source()
- if err != nil {
- return nil, err
- }
- opts["background_filter"] = src
- }
- if a.significanceHeuristic != nil {
- name := a.significanceHeuristic.Name()
- src, err := a.significanceHeuristic.Source()
- if err != nil {
- return nil, err
- }
- opts[name] = src
- }
- // Include/Exclude
- if ie := a.includeExclude; ie != nil {
- // Include
- if ie.Include != "" {
- opts["include"] = ie.Include
- } else if len(ie.IncludeValues) > 0 {
- opts["include"] = ie.IncludeValues
- } else if ie.NumPartitions > 0 {
- inc := make(map[string]interface{})
- inc["partition"] = ie.Partition
- inc["num_partitions"] = ie.NumPartitions
- opts["include"] = inc
- }
- // Exclude
- if ie.Exclude != "" {
- opts["exclude"] = ie.Exclude
- } else if len(ie.ExcludeValues) > 0 {
- opts["exclude"] = ie.ExcludeValues
- }
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go
deleted file mode 100644
index 53ac4461d..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSignificantTextAggregation(t *testing.T) {
- agg := NewSignificantTextAggregation().Field("content")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_text":{"field":"content"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTextAggregationWithArgs(t *testing.T) {
- agg := NewSignificantTextAggregation().
- Field("content").
- ShardSize(5).
- MinDocCount(10).
- BackgroundFilter(NewTermQuery("city", "London"))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"significant_text":{"background_filter":{"term":{"city":"London"}},"field":"content","min_doc_count":10,"shard_size":5}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSignificantTextAggregationWithMetaData(t *testing.T) {
- agg := NewSignificantTextAggregation().Field("content")
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"significant_text":{"field":"content"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go
deleted file mode 100644
index 6bcc322d0..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TermsAggregation is a multi-bucket value source based aggregation
-// where buckets are dynamically built - one per unique value.
-// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
-type TermsAggregation struct {
- field string
- script *Script
- missing interface{}
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-
- size *int
- shardSize *int
- requiredSize *int
- minDocCount *int
- shardMinDocCount *int
- valueType string
- includeExclude *TermsAggregationIncludeExclude
- executionHint string
- collectionMode string
- showTermDocCountError *bool
- order []TermsOrder
-}
-
-func NewTermsAggregation() *TermsAggregation {
- return &TermsAggregation{
- subAggregations: make(map[string]Aggregation, 0),
- }
-}
-
-func (a *TermsAggregation) Field(field string) *TermsAggregation {
- a.field = field
- return a
-}
-
-func (a *TermsAggregation) Script(script *Script) *TermsAggregation {
- a.script = script
- return a
-}
-
-// Missing configures the value to use when documents miss a value.
-func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation {
- a.missing = missing
- return a
-}
-
-func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *TermsAggregation) Size(size int) *TermsAggregation {
- a.size = &size
- return a
-}
-
-func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation {
- a.requiredSize = &requiredSize
- return a
-}
-
-func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation {
- a.shardSize = &shardSize
- return a
-}
-
-func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation {
- a.minDocCount = &minDocCount
- return a
-}
-
-func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation {
- a.shardMinDocCount = &shardMinDocCount
- return a
-}
-
-func (a *TermsAggregation) Include(regexp string) *TermsAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.Include = regexp
- return a
-}
-
-func (a *TermsAggregation) IncludeValues(values ...interface{}) *TermsAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...)
- return a
-}
-
-func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.Exclude = regexp
- return a
-}
-
-func (a *TermsAggregation) ExcludeValues(values ...interface{}) *TermsAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...)
- return a
-}
-
-func (a *TermsAggregation) Partition(p int) *TermsAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.Partition = p
- return a
-}
-
-func (a *TermsAggregation) NumPartitions(n int) *TermsAggregation {
- if a.includeExclude == nil {
- a.includeExclude = &TermsAggregationIncludeExclude{}
- }
- a.includeExclude.NumPartitions = n
- return a
-}
-
-// ValueType can be string, long, or double.
-func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation {
- a.valueType = valueType
- return a
-}
-
-func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation {
- a.order = append(a.order, TermsOrder{Field: order, Ascending: asc})
- return a
-}
-
-func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation {
- // "order" : { "_count" : "asc" }
- a.order = append(a.order, TermsOrder{Field: "_count", Ascending: asc})
- return a
-}
-
-func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation {
- return a.OrderByCount(true)
-}
-
-func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation {
- return a.OrderByCount(false)
-}
-
-func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation {
- // "order" : { "_term" : "asc" }
- a.order = append(a.order, TermsOrder{Field: "_term", Ascending: asc})
- return a
-}
-
-func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation {
- return a.OrderByTerm(true)
-}
-
-func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation {
- return a.OrderByTerm(false)
-}
-
-// OrderByAggregation creates a bucket ordering strategy which sorts buckets
-// based on a single-valued calc get.
-func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation {
- // {
- // "aggs" : {
- // "genders" : {
- // "terms" : {
- // "field" : "gender",
- // "order" : { "avg_height" : "desc" }
- // },
- // "aggs" : {
- // "avg_height" : { "avg" : { "field" : "height" } }
- // }
- // }
- // }
- // }
- a.order = append(a.order, TermsOrder{Field: aggName, Ascending: asc})
- return a
-}
-
-// OrderByAggregationAndMetric creates a bucket ordering strategy which
-// sorts buckets based on a multi-valued calc get.
-func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation {
- // {
- // "aggs" : {
- // "genders" : {
- // "terms" : {
- // "field" : "gender",
- // "order" : { "height_stats.avg" : "desc" }
- // },
- // "aggs" : {
- // "height_stats" : { "stats" : { "field" : "height" } }
- // }
- // }
- // }
- // }
- a.order = append(a.order, TermsOrder{Field: aggName + "." + metric, Ascending: asc})
- return a
-}
-
-func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation {
- a.executionHint = hint
- return a
-}
-
-// Collection mode can be depth_first or breadth_first as of 1.4.0.
-func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation {
- a.collectionMode = collectionMode
- return a
-}
-
-func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation {
- a.showTermDocCountError = &showTermDocCountError
- return a
-}
-
-func (a *TermsAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "genders" : {
- // "terms" : { "field" : "gender" }
- // }
- // }
- // }
- // This method returns only the { "terms" : { "field" : "gender" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["terms"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.missing != nil {
- opts["missing"] = a.missing
- }
-
- // TermsBuilder
- if a.size != nil && *a.size >= 0 {
- opts["size"] = *a.size
- }
- if a.shardSize != nil && *a.shardSize >= 0 {
- opts["shard_size"] = *a.shardSize
- }
- if a.requiredSize != nil && *a.requiredSize >= 0 {
- opts["required_size"] = *a.requiredSize
- }
- if a.minDocCount != nil && *a.minDocCount >= 0 {
- opts["min_doc_count"] = *a.minDocCount
- }
- if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 {
- opts["shard_min_doc_count"] = *a.shardMinDocCount
- }
- if a.showTermDocCountError != nil {
- opts["show_term_doc_count_error"] = *a.showTermDocCountError
- }
- if a.collectionMode != "" {
- opts["collect_mode"] = a.collectionMode
- }
- if a.valueType != "" {
- opts["value_type"] = a.valueType
- }
- if len(a.order) > 0 {
- var orderSlice []interface{}
- for _, order := range a.order {
- src, err := order.Source()
- if err != nil {
- return nil, err
- }
- orderSlice = append(orderSlice, src)
- }
- opts["order"] = orderSlice
- }
- // Include/Exclude
- if ie := a.includeExclude; ie != nil {
- // Include
- if ie.Include != "" {
- opts["include"] = ie.Include
- } else if len(ie.IncludeValues) > 0 {
- opts["include"] = ie.IncludeValues
- } else if ie.NumPartitions > 0 {
- inc := make(map[string]interface{})
- inc["partition"] = ie.Partition
- inc["num_partitions"] = ie.NumPartitions
- opts["include"] = inc
- }
- // Exclude
- if ie.Exclude != "" {
- opts["exclude"] = ie.Exclude
- } else if len(ie.ExcludeValues) > 0 {
- opts["exclude"] = ie.ExcludeValues
- }
- }
-
- if a.executionHint != "" {
- opts["execution_hint"] = a.executionHint
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
-
-// TermsAggregationIncludeExclude allows for include/exclude in a TermsAggregation.
-type TermsAggregationIncludeExclude struct {
- Include string
- Exclude string
- IncludeValues []interface{}
- ExcludeValues []interface{}
- Partition int
- NumPartitions int
-}
-
-// TermsOrder specifies a single order field for a terms aggregation.
-type TermsOrder struct {
- Field string
- Ascending bool
-}
-
-// Source returns serializable JSON of the TermsOrder.
-func (order *TermsOrder) Source() (interface{}, error) {
- source := make(map[string]string)
- if order.Ascending {
- source[order.Field] = "asc"
- } else {
- source[order.Field] = "desc"
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go
deleted file mode 100644
index 351cbf63b..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestTermsAggregation(t *testing.T) {
- agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc()
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"field":"gender","order":[{"_term":"desc"}],"size":10}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsAggregationWithSubAggregation(t *testing.T) {
- subAgg := NewAvgAggregation().Field("height")
- agg := NewTermsAggregation().Field("gender").Size(10).
- OrderByAggregation("avg_height", false)
- agg = agg.SubAggregation("avg_height", subAgg)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":[{"avg_height":"desc"}],"size":10}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) {
- subAgg1 := NewAvgAggregation().Field("height")
- subAgg2 := NewAvgAggregation().Field("width")
- agg := NewTermsAggregation().Field("gender").Size(10).
- OrderByAggregation("avg_height", false)
- agg = agg.SubAggregation("avg_height", subAgg1)
- agg = agg.SubAggregation("avg_width", subAgg2)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":[{"avg_height":"desc"}],"size":10}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsAggregationWithMetaData(t *testing.T) {
- agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc()
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"terms":{"field":"gender","order":[{"_term":"desc"}],"size":10}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsAggregationWithMissing(t *testing.T) {
- agg := NewTermsAggregation().Field("gender").Size(10).Missing("n/a")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"field":"gender","missing":"n/a","size":10}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsAggregationWithIncludeExclude(t *testing.T) {
- agg := NewTermsAggregation().Field("tags").Include(".*sport.*").Exclude("water_.*")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"exclude":"water_.*","field":"tags","include":".*sport.*"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsAggregationWithIncludeExcludeValues(t *testing.T) {
- agg := NewTermsAggregation().Field("make").IncludeValues("mazda", "honda").ExcludeValues("rover", "jensen")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"exclude":["rover","jensen"],"field":"make","include":["mazda","honda"]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsAggregationWithPartitions(t *testing.T) {
- agg := NewTermsAggregation().Field("account_id").Partition(0).NumPartitions(20)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"field":"account_id","include":{"num_partitions":20,"partition":0}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go b/vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go
deleted file mode 100644
index 785f392b6..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MatrixMatrixStatsAggregation ...
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
-// for details.
-type MatrixStatsAggregation struct {
- fields []string
- missing interface{}
- format string
- valueType interface{}
- mode string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-// NewMatrixStatsAggregation initializes a new MatrixStatsAggregation.
-func NewMatrixStatsAggregation() *MatrixStatsAggregation {
- return &MatrixStatsAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *MatrixStatsAggregation) Fields(fields ...string) *MatrixStatsAggregation {
- a.fields = append(a.fields, fields...)
- return a
-}
-
-// Missing configures the value to use when documents miss a value.
-func (a *MatrixStatsAggregation) Missing(missing interface{}) *MatrixStatsAggregation {
- a.missing = missing
- return a
-}
-
-// Mode specifies how to operate. Valid values are: sum, avg, median, min, or max.
-func (a *MatrixStatsAggregation) Mode(mode string) *MatrixStatsAggregation {
- a.mode = mode
- return a
-}
-
-func (a *MatrixStatsAggregation) Format(format string) *MatrixStatsAggregation {
- a.format = format
- return a
-}
-
-func (a *MatrixStatsAggregation) ValueType(valueType interface{}) *MatrixStatsAggregation {
- a.valueType = valueType
- return a
-}
-
-func (a *MatrixStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *MatrixStatsAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *MatrixStatsAggregation) Meta(metaData map[string]interface{}) *MatrixStatsAggregation {
- a.meta = metaData
- return a
-}
-
-// Source returns the JSON to serialize into the request, or an error.
-func (a *MatrixStatsAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "matrixstats" : {
- // "matrix_stats" : {
- // "fields" : ["poverty", "income"],
- // "missing": {"income": 50000},
- // "mode": "avg",
- // ...
- // }
- // }
- // }
- // }
- // This method returns only the { "matrix_stats" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["matrix_stats"] = opts
-
- // MatrixStatsAggregationBuilder
- opts["fields"] = a.fields
- if a.missing != nil {
- opts["missing"] = a.missing
- }
- if a.format != "" {
- opts["format"] = a.format
- }
- if a.valueType != nil {
- opts["value_type"] = a.valueType
- }
- if a.mode != "" {
- opts["mode"] = a.mode
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go b/vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go
deleted file mode 100644
index 28138fe02..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMatrixStatsAggregation(t *testing.T) {
- agg := NewMatrixStatsAggregation().
- Fields("poverty", "income").
- Missing(map[string]interface{}{
- "income": 50000,
- }).
- Mode("avg").
- Format("0000.0").
- ValueType("double")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"matrix_stats":{"fields":["poverty","income"],"format":"0000.0","missing":{"income":50000},"mode":"avg","value_type":"double"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMatrixStatsAggregationWithMetaData(t *testing.T) {
- agg := NewMatrixStatsAggregation().
- Fields("poverty", "income").
- Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"matrix_stats":{"fields":["poverty","income"]},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go
deleted file mode 100644
index 2b764e065..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// AvgAggregation is a single-value metrics aggregation that computes
-// the average of numeric values that are extracted from the
-// aggregated documents. These values can be extracted either from
-// specific numeric fields in the documents, or be generated by
-// a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-avg-aggregation.html
-type AvgAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewAvgAggregation() *AvgAggregation {
- return &AvgAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *AvgAggregation) Field(field string) *AvgAggregation {
- a.field = field
- return a
-}
-
-func (a *AvgAggregation) Script(script *Script) *AvgAggregation {
- a.script = script
- return a
-}
-
-func (a *AvgAggregation) Format(format string) *AvgAggregation {
- a.format = format
- return a
-}
-
-func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *AvgAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "avg_grade" : { "avg" : { "field" : "grade" } }
- // }
- // }
- // This method returns only the { "avg" : { "field" : "grade" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["avg"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
-
- if a.format != "" {
- opts["format"] = a.format
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go
deleted file mode 100644
index 784ff45dd..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestAvgAggregation(t *testing.T) {
- agg := NewAvgAggregation().Field("grade")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"avg":{"field":"grade"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestAvgAggregationWithFormat(t *testing.T) {
- agg := NewAvgAggregation().Field("grade").Format("000.0")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"avg":{"field":"grade","format":"000.0"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestAvgAggregationWithMetaData(t *testing.T) {
- agg := NewAvgAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"avg":{"field":"grade"},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go
deleted file mode 100644
index 3b999c849..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// CardinalityAggregation is a single-value metrics aggregation that
-// calculates an approximate count of distinct values.
-// Values can be extracted either from specific fields in the document
-// or generated by a script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-cardinality-aggregation.html
-type CardinalityAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- precisionThreshold *int64
- rehash *bool
-}
-
-func NewCardinalityAggregation() *CardinalityAggregation {
- return &CardinalityAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation {
- a.field = field
- return a
-}
-
-func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation {
- a.script = script
- return a
-}
-
-func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation {
- a.format = format
- return a
-}
-
-func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation {
- a.precisionThreshold = &threshold
- return a
-}
-
-func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation {
- a.rehash = &rehash
- return a
-}
-
-func (a *CardinalityAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "author_count" : {
- // "cardinality" : { "field" : "author" }
- // }
- // }
- // }
- // This method returns only the "cardinality" : { "field" : "author" } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["cardinality"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
-
- if a.format != "" {
- opts["format"] = a.format
- }
- if a.precisionThreshold != nil {
- opts["precision_threshold"] = *a.precisionThreshold
- }
- if a.rehash != nil {
- opts["rehash"] = *a.rehash
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go
deleted file mode 100644
index b5f8490b5..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestCardinalityAggregation(t *testing.T) {
- agg := NewCardinalityAggregation().Field("author.hash")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"cardinality":{"field":"author.hash"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCardinalityAggregationWithOptions(t *testing.T) {
- agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCardinalityAggregationWithFormat(t *testing.T) {
- agg := NewCardinalityAggregation().Field("author.hash").Format("00000")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"cardinality":{"field":"author.hash","format":"00000"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCardinalityAggregationWithMetaData(t *testing.T) {
- agg := NewCardinalityAggregation().Field("author.hash").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"cardinality":{"field":"author.hash"},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go
deleted file mode 100644
index 4e0bbe65a..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that
-// computes stats over numeric values extracted from the aggregated documents.
-// These values can be extracted either from specific numeric fields
-// in the documents, or be generated by a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-extendedstats-aggregation.html
-type ExtendedStatsAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewExtendedStatsAggregation() *ExtendedStatsAggregation {
- return &ExtendedStatsAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation {
- a.field = field
- return a
-}
-
-func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation {
- a.script = script
- return a
-}
-
-func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation {
- a.format = format
- return a
-}
-
-func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *ExtendedStatsAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "grades_stats" : { "extended_stats" : { "field" : "grade" } }
- // }
- // }
- // This method returns only the { "extended_stats" : { "field" : "grade" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["extended_stats"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go
deleted file mode 100644
index 76489630d..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestExtendedStatsAggregation(t *testing.T) {
- agg := NewExtendedStatsAggregation().Field("grade")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"extended_stats":{"field":"grade"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestExtendedStatsAggregationWithFormat(t *testing.T) {
- agg := NewExtendedStatsAggregation().Field("grade").Format("000.0")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"extended_stats":{"field":"grade","format":"000.0"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go
deleted file mode 100644
index 406f2d000..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// GeoBoundsAggregation is a metric aggregation that computes the
-// bounding box containing all geo_point values for a field.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-geobounds-aggregation.html
-type GeoBoundsAggregation struct {
- field string
- script *Script
- wrapLongitude *bool
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewGeoBoundsAggregation() *GeoBoundsAggregation {
- return &GeoBoundsAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation {
- a.field = field
- return a
-}
-
-func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation {
- a.script = script
- return a
-}
-
-func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation {
- a.wrapLongitude = &wrapLongitude
- return a
-}
-
-func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *GeoBoundsAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "query" : {
- // "match" : { "business_type" : "shop" }
- // },
- // "aggs" : {
- // "viewport" : {
- // "geo_bounds" : {
- // "field" : "location"
- // "wrap_longitude" : "true"
- // }
- // }
- // }
- // }
- //
- // This method returns only the { "geo_bounds" : { ... } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["geo_bounds"] = opts
-
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.wrapLongitude != nil {
- opts["wrap_longitude"] = *a.wrapLongitude
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go
deleted file mode 100644
index ea713c604..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGeoBoundsAggregation(t *testing.T) {
- agg := NewGeoBoundsAggregation().Field("location")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_bounds":{"field":"location"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) {
- agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoBoundsAggregationWithMetaData(t *testing.T) {
- agg := NewGeoBoundsAggregation().Field("location").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_bounds":{"field":"location"},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_max.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_max.go
deleted file mode 100644
index acdfa14a8..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_max.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MaxAggregation is a single-value metrics aggregation that keeps track and
-// returns the maximum value among the numeric values extracted from
-// the aggregated documents. These values can be extracted either from
-// specific numeric fields in the documents, or be generated by
-// a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-max-aggregation.html
-type MaxAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewMaxAggregation() *MaxAggregation {
- return &MaxAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *MaxAggregation) Field(field string) *MaxAggregation {
- a.field = field
- return a
-}
-
-func (a *MaxAggregation) Script(script *Script) *MaxAggregation {
- a.script = script
- return a
-}
-
-func (a *MaxAggregation) Format(format string) *MaxAggregation {
- a.format = format
- return a
-}
-
-func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation {
- a.meta = metaData
- return a
-}
-func (a *MaxAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "max_price" : { "max" : { "field" : "price" } }
- // }
- // }
- // This method returns only the { "max" : { "field" : "price" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["max"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go
deleted file mode 100644
index 773cc2e4b..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMaxAggregation(t *testing.T) {
- agg := NewMaxAggregation().Field("price")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"max":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMaxAggregationWithFormat(t *testing.T) {
- agg := NewMaxAggregation().Field("price").Format("00000.00")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"max":{"field":"price","format":"00000.00"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMaxAggregationWithMetaData(t *testing.T) {
- agg := NewMaxAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"max":{"field":"price"},"meta":{"name":"Oliver"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_min.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_min.go
deleted file mode 100644
index af63585da..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_min.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MinAggregation is a single-value metrics aggregation that keeps track and
-// returns the minimum value among numeric values extracted from the
-// aggregated documents. These values can be extracted either from
-// specific numeric fields in the documents, or be generated by a
-// provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-min-aggregation.html
-type MinAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewMinAggregation() *MinAggregation {
- return &MinAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *MinAggregation) Field(field string) *MinAggregation {
- a.field = field
- return a
-}
-
-func (a *MinAggregation) Script(script *Script) *MinAggregation {
- a.script = script
- return a
-}
-
-func (a *MinAggregation) Format(format string) *MinAggregation {
- a.format = format
- return a
-}
-
-func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *MinAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "min_price" : { "min" : { "field" : "price" } }
- // }
- // }
- // This method returns only the { "min" : { "field" : "price" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["min"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go
deleted file mode 100644
index fcde3817c..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMinAggregation(t *testing.T) {
- agg := NewMinAggregation().Field("price")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"min":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMinAggregationWithFormat(t *testing.T) {
- agg := NewMinAggregation().Field("price").Format("00000.00")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"min":{"field":"price","format":"00000.00"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMinAggregationWithMetaData(t *testing.T) {
- agg := NewMinAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"min":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go
deleted file mode 100644
index 674fc41f9..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// PercentileRanksAggregation
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-rank-aggregation.html
-type PercentileRanksAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- values []float64
- compression *float64
- estimator string
-}
-
-func NewPercentileRanksAggregation() *PercentileRanksAggregation {
- return &PercentileRanksAggregation{
- subAggregations: make(map[string]Aggregation),
- values: make([]float64, 0),
- }
-}
-
-func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation {
- a.field = field
- return a
-}
-
-func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation {
- a.script = script
- return a
-}
-
-func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation {
- a.format = format
- return a
-}
-
-func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation {
- a.values = append(a.values, values...)
- return a
-}
-
-func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation {
- a.compression = &compression
- return a
-}
-
-func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation {
- a.estimator = estimator
- return a
-}
-
-func (a *PercentileRanksAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "load_time_outlier" : {
- // "percentile_ranks" : {
- // "field" : "load_time"
- // "values" : [15, 30]
- // }
- // }
- // }
- // }
- // This method returns only the
- // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } }
- // part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["percentile_ranks"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
- if len(a.values) > 0 {
- opts["values"] = a.values
- }
- if a.compression != nil {
- opts["compression"] = *a.compression
- }
- if a.estimator != "" {
- opts["estimator"] = a.estimator
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go
deleted file mode 100644
index a4bac02b5..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestPercentileRanksAggregation(t *testing.T) {
- agg := NewPercentileRanksAggregation().Field("load_time")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentile_ranks":{"field":"load_time"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercentileRanksAggregationWithCustomValues(t *testing.T) {
- agg := NewPercentileRanksAggregation().Field("load_time").Values(15, 30)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentile_ranks":{"field":"load_time","values":[15,30]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercentileRanksAggregationWithFormat(t *testing.T) {
- agg := NewPercentileRanksAggregation().Field("load_time").Format("000.0")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentile_ranks":{"field":"load_time","format":"000.0"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercentileRanksAggregationWithMetaData(t *testing.T) {
- agg := NewPercentileRanksAggregation().Field("load_time").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"percentile_ranks":{"field":"load_time"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go
deleted file mode 100644
index a1d78c8f2..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// PercentilesAggregation
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-aggregation.html
-type PercentilesAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- percentiles []float64
- compression *float64
- estimator string
-}
-
-func NewPercentilesAggregation() *PercentilesAggregation {
- return &PercentilesAggregation{
- subAggregations: make(map[string]Aggregation),
- percentiles: make([]float64, 0),
- }
-}
-
-func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation {
- a.field = field
- return a
-}
-
-func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation {
- a.script = script
- return a
-}
-
-func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation {
- a.format = format
- return a
-}
-
-func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation {
- a.percentiles = append(a.percentiles, percentiles...)
- return a
-}
-
-func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation {
- a.compression = &compression
- return a
-}
-
-func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation {
- a.estimator = estimator
- return a
-}
-
-func (a *PercentilesAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "load_time_outlier" : {
- // "percentiles" : {
- // "field" : "load_time"
- // }
- // }
- // }
- // }
- // This method returns only the
- // { "percentiles" : { "field" : "load_time" } }
- // part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["percentiles"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
- if len(a.percentiles) > 0 {
- opts["percents"] = a.percentiles
- }
- if a.compression != nil {
- opts["compression"] = *a.compression
- }
- if a.estimator != "" {
- opts["estimator"] = a.estimator
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go
deleted file mode 100644
index 93df1dd29..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestPercentilesAggregation(t *testing.T) {
- agg := NewPercentilesAggregation().Field("price")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentiles":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercentilesAggregationWithCustomPercents(t *testing.T) {
- agg := NewPercentilesAggregation().Field("price").Percentiles(0.2, 0.5, 0.9)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentiles":{"field":"price","percents":[0.2,0.5,0.9]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercentilesAggregationWithFormat(t *testing.T) {
- agg := NewPercentilesAggregation().Field("price").Format("00000.00")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentiles":{"field":"price","format":"00000.00"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercentilesAggregationWithMetaData(t *testing.T) {
- agg := NewPercentilesAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"percentiles":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go
deleted file mode 100644
index b9bbe7cff..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// StatsAggregation is a multi-value metrics aggregation that computes stats
-// over numeric values extracted from the aggregated documents.
-// These values can be extracted either from specific numeric fields
-// in the documents, or be generated by a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
-type StatsAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewStatsAggregation() *StatsAggregation {
- return &StatsAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *StatsAggregation) Field(field string) *StatsAggregation {
- a.field = field
- return a
-}
-
-func (a *StatsAggregation) Script(script *Script) *StatsAggregation {
- a.script = script
- return a
-}
-
-func (a *StatsAggregation) Format(format string) *StatsAggregation {
- a.format = format
- return a
-}
-
-func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *StatsAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "grades_stats" : { "stats" : { "field" : "grade" } }
- // }
- // }
- // This method returns only the { "stats" : { "field" : "grade" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["stats"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go
deleted file mode 100644
index 5cff372d4..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestStatsAggregation(t *testing.T) {
- agg := NewStatsAggregation().Field("grade")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"stats":{"field":"grade"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestStatsAggregationWithFormat(t *testing.T) {
- agg := NewStatsAggregation().Field("grade").Format("0000.0")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"stats":{"field":"grade","format":"0000.0"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestStatsAggregationWithMetaData(t *testing.T) {
- agg := NewStatsAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"stats":{"field":"grade"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go
deleted file mode 100644
index e1c07c9c1..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// SumAggregation is a single-value metrics aggregation that sums up
-// numeric values that are extracted from the aggregated documents.
-// These values can be extracted either from specific numeric fields
-// in the documents, or be generated by a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-sum-aggregation.html
-type SumAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewSumAggregation() *SumAggregation {
- return &SumAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *SumAggregation) Field(field string) *SumAggregation {
- a.field = field
- return a
-}
-
-func (a *SumAggregation) Script(script *Script) *SumAggregation {
- a.script = script
- return a
-}
-
-func (a *SumAggregation) Format(format string) *SumAggregation {
- a.format = format
- return a
-}
-
-func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *SumAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "intraday_return" : { "sum" : { "field" : "change" } }
- // }
- // }
- // This method returns only the { "sum" : { "field" : "change" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["sum"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go
deleted file mode 100644
index ff0e42545..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSumAggregation(t *testing.T) {
- agg := NewSumAggregation().Field("price")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"sum":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSumAggregationWithFormat(t *testing.T) {
- agg := NewSumAggregation().Field("price").Format("00000.00")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"sum":{"field":"price","format":"00000.00"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSumAggregationWithMetaData(t *testing.T) {
- agg := NewSumAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"sum":{"field":"price"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go
deleted file mode 100644
index 2b181895e..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TopHitsAggregation keeps track of the most relevant document
-// being aggregated. This aggregator is intended to be used as a
-// sub aggregator, so that the top matching documents
-// can be aggregated per bucket.
-//
-// It can effectively be used to group result sets by certain fields via
-// a bucket aggregator. One or more bucket aggregators determines by
-// which properties a result set get sliced into.
-//
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-top-hits-aggregation.html
-type TopHitsAggregation struct {
- searchSource *SearchSource
-}
-
-func NewTopHitsAggregation() *TopHitsAggregation {
- return &TopHitsAggregation{
- searchSource: NewSearchSource(),
- }
-}
-
-func (a *TopHitsAggregation) From(from int) *TopHitsAggregation {
- a.searchSource = a.searchSource.From(from)
- return a
-}
-
-func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation {
- a.searchSource = a.searchSource.Size(size)
- return a
-}
-
-func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation {
- a.searchSource = a.searchSource.TrackScores(trackScores)
- return a
-}
-
-func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation {
- a.searchSource = a.searchSource.Explain(explain)
- return a
-}
-
-func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation {
- a.searchSource = a.searchSource.Version(version)
- return a
-}
-
-func (a *TopHitsAggregation) NoStoredFields() *TopHitsAggregation {
- a.searchSource = a.searchSource.NoStoredFields()
- return a
-}
-
-func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation {
- a.searchSource = a.searchSource.FetchSource(fetchSource)
- return a
-}
-
-func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation {
- a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext)
- return a
-}
-
-func (a *TopHitsAggregation) DocvalueFields(docvalueFields ...string) *TopHitsAggregation {
- a.searchSource = a.searchSource.DocvalueFields(docvalueFields...)
- return a
-}
-
-func (a *TopHitsAggregation) DocvalueField(docvalueField string) *TopHitsAggregation {
- a.searchSource = a.searchSource.DocvalueField(docvalueField)
- return a
-}
-
-func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation {
- a.searchSource = a.searchSource.ScriptFields(scriptFields...)
- return a
-}
-
-func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation {
- a.searchSource = a.searchSource.ScriptField(scriptField)
- return a
-}
-
-func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation {
- a.searchSource = a.searchSource.Sort(field, ascending)
- return a
-}
-
-func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation {
- a.searchSource = a.searchSource.SortWithInfo(info)
- return a
-}
-
-func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation {
- a.searchSource = a.searchSource.SortBy(sorter...)
- return a
-}
-
-func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation {
- a.searchSource = a.searchSource.Highlight(highlight)
- return a
-}
-
-func (a *TopHitsAggregation) Highlighter() *Highlight {
- return a.searchSource.Highlighter()
-}
-
-func (a *TopHitsAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs": {
- // "top_tag_hits": {
- // "top_hits": {
- // "sort": [
- // {
- // "last_activity_date": {
- // "order": "desc"
- // }
- // }
- // ],
- // "_source": {
- // "include": [
- // "title"
- // ]
- // },
- // "size" : 1
- // }
- // }
- // }
- // }
- // This method returns only the { "top_hits" : { ... } } part.
-
- source := make(map[string]interface{})
- src, err := a.searchSource.Source()
- if err != nil {
- return nil, err
- }
- source["top_hits"] = src
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go
deleted file mode 100644
index 861f079fe..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestTopHitsAggregation(t *testing.T) {
- fsc := NewFetchSourceContext(true).Include("title")
- agg := NewTopHitsAggregation().
- Sort("last_activity_date", false).
- FetchSourceContext(fsc).
- Size(1)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"top_hits":{"_source":{"includes":["title"]},"size":1,"sort":[{"last_activity_date":{"order":"desc"}}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go
deleted file mode 100644
index d56f1f873..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// ValueCountAggregation is a single-value metrics aggregation that counts
-// the number of values that are extracted from the aggregated documents.
-// These values can be extracted either from specific fields in the documents,
-// or be generated by a provided script. Typically, this aggregator will be
-// used in conjunction with other single-value aggregations.
-// For example, when computing the avg one might be interested in the
-// number of values the average is computed over.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-valuecount-aggregation.html
-type ValueCountAggregation struct {
- field string
- script *Script
- format string
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-func NewValueCountAggregation() *ValueCountAggregation {
- return &ValueCountAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation {
- a.field = field
- return a
-}
-
-func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation {
- a.script = script
- return a
-}
-
-func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation {
- a.format = format
- return a
-}
-
-func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation {
- a.meta = metaData
- return a
-}
-
-func (a *ValueCountAggregation) Source() (interface{}, error) {
- // Example:
- // {
- // "aggs" : {
- // "grades_count" : { "value_count" : { "field" : "grade" } }
- // }
- // }
- // This method returns only the { "value_count" : { "field" : "grade" } } part.
-
- source := make(map[string]interface{})
- opts := make(map[string]interface{})
- source["value_count"] = opts
-
- // ValuesSourceAggregationBuilder
- if a.field != "" {
- opts["field"] = a.field
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- opts["script"] = src
- }
- if a.format != "" {
- opts["format"] = a.format
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go
deleted file mode 100644
index 18d2ba119..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestValueCountAggregation(t *testing.T) {
- agg := NewValueCountAggregation().Field("grade")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"value_count":{"field":"grade"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestValueCountAggregationWithFormat(t *testing.T) {
- // Format comes with 1.5.0+
- agg := NewValueCountAggregation().Field("grade").Format("0000.0")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"value_count":{"field":"grade","format":"0000.0"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestValueCountAggregationWithMetaData(t *testing.T) {
- agg := NewValueCountAggregation().Field("grade")
- agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"meta":{"name":"Oliver"},"value_count":{"field":"grade"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go
deleted file mode 100644
index f37a9bdb8..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// AvgBucketAggregation is a sibling pipeline aggregation which calculates
-// the (mean) average value of a specified metric in a sibling aggregation.
-// The specified metric must be numeric and the sibling aggregation must
-// be a multi-bucket aggregation.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-avg-bucket-aggregation.html
-type AvgBucketAggregation struct {
- format string
- gapPolicy string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation.
-func NewAvgBucketAggregation() *AvgBucketAggregation {
- return &AvgBucketAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *AvgBucketAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["avg_bucket"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go
deleted file mode 100644
index 019b8f1ad..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestAvgBucketAggregation(t *testing.T) {
- agg := NewAvgBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"avg_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go
deleted file mode 100644
index 34e356964..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// BucketScriptAggregation is a parent pipeline aggregation which executes
-// a script which can perform per bucket computations on specified metrics
-// in the parent multi-bucket aggregation. The specified metric must be
-// numeric and the script must return a numeric value.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-script-aggregation.html
-type BucketScriptAggregation struct {
- format string
- gapPolicy string
- script *Script
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPathsMap map[string]string
-}
-
-// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation.
-func NewBucketScriptAggregation() *BucketScriptAggregation {
- return &BucketScriptAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPathsMap: make(map[string]string),
- }
-}
-
-func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// Script is the script to run.
-func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation {
- a.script = script
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator.
-func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation {
- a.bucketsPathsMap = bucketsPathsMap
- return a
-}
-
-// AddBucketsPath adds a bucket path to use for this pipeline aggregator.
-func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation {
- if a.bucketsPathsMap == nil {
- a.bucketsPathsMap = make(map[string]string)
- }
- a.bucketsPathsMap[name] = path
- return a
-}
-
-func (a *BucketScriptAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["bucket_script"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- params["script"] = src
- }
-
- // Add buckets paths
- if len(a.bucketsPathsMap) > 0 {
- params["buckets_path"] = a.bucketsPathsMap
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go
deleted file mode 100644
index 3c101c706..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestBucketScriptAggregation(t *testing.T) {
- agg := NewBucketScriptAggregation().
- AddBucketsPath("tShirtSales", "t-shirts>sales").
- AddBucketsPath("totalSales", "total_sales").
- Script(NewScript("tShirtSales / totalSales * 100"))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":{"source":"tShirtSales / totalSales * 100"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go
deleted file mode 100644
index 233414d70..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// BucketSelectorAggregation is a parent pipeline aggregation which
-// determines whether the current bucket will be retained in the parent
-// multi-bucket aggregation. The specific metric must be numeric and
-// the script must return a boolean value. If the script language is
-// expression then a numeric return value is permitted. In this case 0.0
-// will be evaluated as false and all other values will evaluate to true.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-selector-aggregation.html
-type BucketSelectorAggregation struct {
- format string
- gapPolicy string
- script *Script
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPathsMap map[string]string
-}
-
-// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation.
-func NewBucketSelectorAggregation() *BucketSelectorAggregation {
- return &BucketSelectorAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPathsMap: make(map[string]string),
- }
-}
-
-func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// Script is the script to run.
-func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation {
- a.script = script
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator.
-func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation {
- a.bucketsPathsMap = bucketsPathsMap
- return a
-}
-
-// AddBucketsPath adds a bucket path to use for this pipeline aggregator.
-func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation {
- if a.bucketsPathsMap == nil {
- a.bucketsPathsMap = make(map[string]string)
- }
- a.bucketsPathsMap[name] = path
- return a
-}
-
-func (a *BucketSelectorAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["bucket_selector"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
- if a.script != nil {
- src, err := a.script.Source()
- if err != nil {
- return nil, err
- }
- params["script"] = src
- }
-
- // Add buckets paths
- if len(a.bucketsPathsMap) > 0 {
- params["buckets_path"] = a.bucketsPathsMap
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go
deleted file mode 100644
index e378c2832..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestBucketSelectorAggregation(t *testing.T) {
- agg := NewBucketSelectorAggregation().
- AddBucketsPath("totalSales", "total_sales").
- Script(NewScript("totalSales >= 1000"))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":{"source":"totalSales \u003e= 1000"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go
deleted file mode 100644
index 80a1db42d..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// CumulativeSumAggregation is a parent pipeline aggregation which calculates
-// the cumulative sum of a specified metric in a parent histogram (or date_histogram)
-// aggregation. The specified metric must be numeric and the enclosing
-// histogram must have min_doc_count set to 0 (default for histogram aggregations).
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-cumulative-sum-aggregation.html
-type CumulativeSumAggregation struct {
- format string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation.
-func NewCumulativeSumAggregation() *CumulativeSumAggregation {
- return &CumulativeSumAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation {
- a.format = format
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *CumulativeSumAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["cumulative_sum"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go
deleted file mode 100644
index 69a215d43..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestCumulativeSumAggregation(t *testing.T) {
- agg := NewCumulativeSumAggregation().BucketsPath("sales")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"cumulative_sum":{"buckets_path":"sales"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go
deleted file mode 100644
index ee7114e25..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// DerivativeAggregation is a parent pipeline aggregation which calculates
-// the derivative of a specified metric in a parent histogram (or date_histogram)
-// aggregation. The specified metric must be numeric and the enclosing
-// histogram must have min_doc_count set to 0 (default for histogram aggregations).
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-derivative-aggregation.html
-type DerivativeAggregation struct {
- format string
- gapPolicy string
- unit string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewDerivativeAggregation creates and initializes a new DerivativeAggregation.
-func NewDerivativeAggregation() *DerivativeAggregation {
- return &DerivativeAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// Unit sets the unit provided, e.g. "1d" or "1y".
-// It is only useful when calculating the derivative using a date_histogram.
-func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation {
- a.unit = unit
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *DerivativeAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["derivative"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
- if a.unit != "" {
- params["unit"] = a.unit
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go
deleted file mode 100644
index 7e7b26749..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestDerivativeAggregation(t *testing.T) {
- agg := NewDerivativeAggregation().BucketsPath("sales")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"derivative":{"buckets_path":"sales"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go
deleted file mode 100644
index 5da049561..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MaxBucketAggregation is a sibling pipeline aggregation which identifies
-// the bucket(s) with the maximum value of a specified metric in a sibling
-// aggregation and outputs both the value and the key(s) of the bucket(s).
-// The specified metric must be numeric and the sibling aggregation must
-// be a multi-bucket aggregation.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-max-bucket-aggregation.html
-type MaxBucketAggregation struct {
- format string
- gapPolicy string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation.
-func NewMaxBucketAggregation() *MaxBucketAggregation {
- return &MaxBucketAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *MaxBucketAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["max_bucket"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go
deleted file mode 100644
index aa9bf2f6d..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMaxBucketAggregation(t *testing.T) {
- agg := NewMaxBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"max_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go
deleted file mode 100644
index 463bb919e..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MinBucketAggregation is a sibling pipeline aggregation which identifies
-// the bucket(s) with the maximum value of a specified metric in a sibling
-// aggregation and outputs both the value and the key(s) of the bucket(s).
-// The specified metric must be numeric and the sibling aggregation must
-// be a multi-bucket aggregation.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-min-bucket-aggregation.html
-type MinBucketAggregation struct {
- format string
- gapPolicy string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewMinBucketAggregation creates and initializes a new MinBucketAggregation.
-func NewMinBucketAggregation() *MinBucketAggregation {
- return &MinBucketAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *MinBucketAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["min_bucket"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go
deleted file mode 100644
index ff4abf2b2..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMinBucketAggregation(t *testing.T) {
- agg := NewMinBucketAggregation().BucketsPath("sales_per_month>sales").GapPolicy("skip")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"min_bucket":{"buckets_path":"sales_per_month\u003esales","gap_policy":"skip"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go
deleted file mode 100644
index 821d73842..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MovAvgAggregation operates on a series of data. It will slide a window
-// across the data and emit the average value of that window.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html
-type MovAvgAggregation struct {
- format string
- gapPolicy string
- model MovAvgModel
- window *int
- predict *int
- minimize *bool
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewMovAvgAggregation creates and initializes a new MovAvgAggregation.
-func NewMovAvgAggregation() *MovAvgAggregation {
- return &MovAvgAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// Model is used to define what type of moving average you want to use
-// in the series.
-func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation {
- a.model = model
- return a
-}
-
-// Window sets the window size for the moving average. This window will
-// "slide" across the series, and the values inside that window will
-// be used to calculate the moving avg value.
-func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation {
- a.window = &window
- return a
-}
-
-// Predict sets the number of predictions that should be returned.
-// Each prediction will be spaced at the intervals in the histogram.
-// E.g. a predict of 2 will return two new buckets at the end of the
-// histogram with the predicted values.
-func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation {
- a.predict = &numPredictions
- return a
-}
-
-// Minimize determines if the model should be fit to the data using a
-// cost minimizing algorithm.
-func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation {
- a.minimize = &minimize
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *MovAvgAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["moving_avg"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
- if a.model != nil {
- params["model"] = a.model.Name()
- settings := a.model.Settings()
- if len(settings) > 0 {
- params["settings"] = settings
- }
- }
- if a.window != nil {
- params["window"] = *a.window
- }
- if a.predict != nil {
- params["predict"] = *a.predict
- }
- if a.minimize != nil {
- params["minimize"] = *a.minimize
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
-
-// -- Models for moving averages --
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_models
-
-// MovAvgModel specifies the model to use with the MovAvgAggregation.
-type MovAvgModel interface {
- Name() string
- Settings() map[string]interface{}
-}
-
-// -- EWMA --
-
-// EWMAMovAvgModel calculates an exponentially weighted moving average.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted
-type EWMAMovAvgModel struct {
- alpha *float64
-}
-
-// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel.
-func NewEWMAMovAvgModel() *EWMAMovAvgModel {
- return &EWMAMovAvgModel{}
-}
-
-// Alpha controls the smoothing of the data. Alpha = 1 retains no memory
-// of past values (e.g. a random walk), while alpha = 0 retains infinite
-// memory of past values (e.g. the series mean). Useful values are somewhere
-// in between. Defaults to 0.5.
-func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel {
- m.alpha = &alpha
- return m
-}
-
-// Name of the model.
-func (m *EWMAMovAvgModel) Name() string {
- return "ewma"
-}
-
-// Settings of the model.
-func (m *EWMAMovAvgModel) Settings() map[string]interface{} {
- settings := make(map[string]interface{})
- if m.alpha != nil {
- settings["alpha"] = *m.alpha
- }
- return settings
-}
-
-// -- Holt linear --
-
-// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear
-type HoltLinearMovAvgModel struct {
- alpha *float64
- beta *float64
-}
-
-// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel.
-func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel {
- return &HoltLinearMovAvgModel{}
-}
-
-// Alpha controls the smoothing of the data. Alpha = 1 retains no memory
-// of past values (e.g. a random walk), while alpha = 0 retains infinite
-// memory of past values (e.g. the series mean). Useful values are somewhere
-// in between. Defaults to 0.5.
-func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel {
- m.alpha = &alpha
- return m
-}
-
-// Beta is equivalent to Alpha but controls the smoothing of the trend
-// instead of the data.
-func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel {
- m.beta = &beta
- return m
-}
-
-// Name of the model.
-func (m *HoltLinearMovAvgModel) Name() string {
- return "holt"
-}
-
-// Settings of the model.
-func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} {
- settings := make(map[string]interface{})
- if m.alpha != nil {
- settings["alpha"] = *m.alpha
- }
- if m.beta != nil {
- settings["beta"] = *m.beta
- }
- return settings
-}
-
-// -- Holt Winters --
-
-// HoltWintersMovAvgModel calculates a triple exponential weighted moving average.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters
-type HoltWintersMovAvgModel struct {
- alpha *float64
- beta *float64
- gamma *float64
- period *int
- seasonalityType string
- pad *bool
-}
-
-// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel.
-func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel {
- return &HoltWintersMovAvgModel{}
-}
-
-// Alpha controls the smoothing of the data. Alpha = 1 retains no memory
-// of past values (e.g. a random walk), while alpha = 0 retains infinite
-// memory of past values (e.g. the series mean). Useful values are somewhere
-// in between. Defaults to 0.5.
-func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel {
- m.alpha = &alpha
- return m
-}
-
-// Beta is equivalent to Alpha but controls the smoothing of the trend
-// instead of the data.
-func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel {
- m.beta = &beta
- return m
-}
-
-func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel {
- m.gamma = &gamma
- return m
-}
-
-func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel {
- m.period = &period
- return m
-}
-
-func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel {
- m.seasonalityType = typ
- return m
-}
-
-func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel {
- m.pad = &pad
- return m
-}
-
-// Name of the model.
-func (m *HoltWintersMovAvgModel) Name() string {
- return "holt_winters"
-}
-
-// Settings of the model.
-func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} {
- settings := make(map[string]interface{})
- if m.alpha != nil {
- settings["alpha"] = *m.alpha
- }
- if m.beta != nil {
- settings["beta"] = *m.beta
- }
- if m.gamma != nil {
- settings["gamma"] = *m.gamma
- }
- if m.period != nil {
- settings["period"] = *m.period
- }
- if m.pad != nil {
- settings["pad"] = *m.pad
- }
- if m.seasonalityType != "" {
- settings["type"] = m.seasonalityType
- }
- return settings
-}
-
-// -- Linear --
-
-// LinearMovAvgModel calculates a linearly weighted moving average, such
-// that older values are linearly less important. "Time" is determined
-// by position in collection.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_linear
-type LinearMovAvgModel struct {
-}
-
-// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel.
-func NewLinearMovAvgModel() *LinearMovAvgModel {
- return &LinearMovAvgModel{}
-}
-
-// Name of the model.
-func (m *LinearMovAvgModel) Name() string {
- return "linear"
-}
-
-// Settings of the model.
-func (m *LinearMovAvgModel) Settings() map[string]interface{} {
- return nil
-}
-
-// -- Simple --
-
-// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_simple
-type SimpleMovAvgModel struct {
-}
-
-// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel.
-func NewSimpleMovAvgModel() *SimpleMovAvgModel {
- return &SimpleMovAvgModel{}
-}
-
-// Name of the model.
-func (m *SimpleMovAvgModel) Name() string {
- return "simple"
-}
-
-// Settings of the model.
-func (m *SimpleMovAvgModel) Settings() map[string]interface{} {
- return nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go
deleted file mode 100644
index af2fc7c27..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMovAvgAggregation(t *testing.T) {
- agg := NewMovAvgAggregation().BucketsPath("the_sum")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"moving_avg":{"buckets_path":"the_sum"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMovAvgAggregationWithSimpleModel(t *testing.T) {
- agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewSimpleMovAvgModel())
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"moving_avg":{"buckets_path":"the_sum","model":"simple","window":30}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMovAvgAggregationWithLinearModel(t *testing.T) {
- agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewLinearMovAvgModel())
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"moving_avg":{"buckets_path":"the_sum","model":"linear","window":30}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMovAvgAggregationWithEWMAModel(t *testing.T) {
- agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewEWMAMovAvgModel().Alpha(0.5))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"moving_avg":{"buckets_path":"the_sum","model":"ewma","settings":{"alpha":0.5},"window":30}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMovAvgAggregationWithHoltLinearModel(t *testing.T) {
- agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).
- Model(NewHoltLinearMovAvgModel().Alpha(0.5).Beta(0.4))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"moving_avg":{"buckets_path":"the_sum","model":"holt","settings":{"alpha":0.5,"beta":0.4},"window":30}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMovAvgAggregationWithHoltWintersModel(t *testing.T) {
- agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Predict(10).Minimize(true).
- Model(NewHoltWintersMovAvgModel().Alpha(0.5).Beta(0.4).Gamma(0.3).Period(7).Pad(true))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"moving_avg":{"buckets_path":"the_sum","minimize":true,"model":"holt_winters","predict":10,"settings":{"alpha":0.5,"beta":0.4,"gamma":0.3,"pad":true,"period":7},"window":30}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMovAvgAggregationWithSubAggs(t *testing.T) {
- agg := NewMovAvgAggregation().BucketsPath("the_sum")
- agg = agg.SubAggregation("avg_sum", NewAvgAggregation().Field("height"))
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"aggregations":{"avg_sum":{"avg":{"field":"height"}}},"moving_avg":{"buckets_path":"the_sum"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go
deleted file mode 100644
index 9a3556269..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// PercentilesBucketAggregation is a sibling pipeline aggregation which calculates
-// percentiles across all bucket of a specified metric in a sibling aggregation.
-// The specified metric must be numeric and the sibling aggregation must
-// be a multi-bucket aggregation.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html
-type PercentilesBucketAggregation struct {
- format string
- gapPolicy string
- percents []float64
- bucketsPaths []string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
-}
-
-// NewPercentilesBucketAggregation creates and initializes a new PercentilesBucketAggregation.
-func NewPercentilesBucketAggregation() *PercentilesBucketAggregation {
- return &PercentilesBucketAggregation{
- subAggregations: make(map[string]Aggregation),
- }
-}
-
-// Format to apply the output value of this aggregation.
-func (p *PercentilesBucketAggregation) Format(format string) *PercentilesBucketAggregation {
- p.format = format
- return p
-}
-
-// Percents to calculate percentiles for in this aggregation.
-func (p *PercentilesBucketAggregation) Percents(percents ...float64) *PercentilesBucketAggregation {
- p.percents = percents
- return p
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (p *PercentilesBucketAggregation) GapPolicy(gapPolicy string) *PercentilesBucketAggregation {
- p.gapPolicy = gapPolicy
- return p
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (p *PercentilesBucketAggregation) GapInsertZeros() *PercentilesBucketAggregation {
- p.gapPolicy = "insert_zeros"
- return p
-}
-
-// GapSkip skips gaps in the series.
-func (p *PercentilesBucketAggregation) GapSkip() *PercentilesBucketAggregation {
- p.gapPolicy = "skip"
- return p
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (p *PercentilesBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesBucketAggregation {
- p.subAggregations[name] = subAggregation
- return p
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (p *PercentilesBucketAggregation) Meta(metaData map[string]interface{}) *PercentilesBucketAggregation {
- p.meta = metaData
- return p
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (p *PercentilesBucketAggregation) BucketsPath(bucketsPaths ...string) *PercentilesBucketAggregation {
- p.bucketsPaths = append(p.bucketsPaths, bucketsPaths...)
- return p
-}
-
-func (p *PercentilesBucketAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["percentiles_bucket"] = params
-
- if p.format != "" {
- params["format"] = p.format
- }
- if p.gapPolicy != "" {
- params["gap_policy"] = p.gapPolicy
- }
-
- // Add buckets paths
- switch len(p.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = p.bucketsPaths[0]
- default:
- params["buckets_path"] = p.bucketsPaths
- }
-
- // Add percents
- if len(p.percents) > 0 {
- params["percents"] = p.percents
- }
-
- // AggregationBuilder (SubAggregations)
- if len(p.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range p.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(p.meta) > 0 {
- source["meta"] = p.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go
deleted file mode 100644
index 5fa2639de..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestPercentilesBucketAggregation(t *testing.T) {
- agg := NewPercentilesBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentiles_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercentilesBucketAggregationWithPercents(t *testing.T) {
- agg := NewPercentilesBucketAggregation().BucketsPath("the_sum").Percents(0.1, 1.0, 5.0, 25, 50)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percentiles_bucket":{"buckets_path":"the_sum","percents":[0.1,1,5,25,50]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go
deleted file mode 100644
index e13b94ea9..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// SerialDiffAggregation implements serial differencing.
-// Serial differencing is a technique where values in a time series are
-// subtracted from itself at different time lags or periods.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-serialdiff-aggregation.html
-type SerialDiffAggregation struct {
- format string
- gapPolicy string
- lag *int
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation.
-func NewSerialDiffAggregation() *SerialDiffAggregation {
- return &SerialDiffAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// Lag specifies the historical bucket to subtract from the current value.
-// E.g. a lag of 7 will subtract the current value from the value 7 buckets
-// ago. Lag must be a positive, non-zero integer.
-func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation {
- a.lag = &lag
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *SerialDiffAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["serial_diff"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
- if a.lag != nil {
- params["lag"] = *a.lag
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go
deleted file mode 100644
index 6d336a2ee..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSerialDiffAggregation(t *testing.T) {
- agg := NewSerialDiffAggregation().BucketsPath("the_sum").Lag(7)
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"serial_diff":{"buckets_path":"the_sum","lag":7}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go
deleted file mode 100644
index e68a420f2..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// StatsBucketAggregation is a sibling pipeline aggregation which calculates
-// a variety of stats across all bucket of a specified metric in a sibling aggregation.
-// The specified metric must be numeric and the sibling aggregation must
-// be a multi-bucket aggregation.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-stats-bucket-aggregation.html
-type StatsBucketAggregation struct {
- format string
- gapPolicy string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewStatsBucketAggregation creates and initializes a new StatsBucketAggregation.
-func NewStatsBucketAggregation() *StatsBucketAggregation {
- return &StatsBucketAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (s *StatsBucketAggregation) Format(format string) *StatsBucketAggregation {
- s.format = format
- return s
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (s *StatsBucketAggregation) GapPolicy(gapPolicy string) *StatsBucketAggregation {
- s.gapPolicy = gapPolicy
- return s
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (s *StatsBucketAggregation) GapInsertZeros() *StatsBucketAggregation {
- s.gapPolicy = "insert_zeros"
- return s
-}
-
-// GapSkip skips gaps in the series.
-func (s *StatsBucketAggregation) GapSkip() *StatsBucketAggregation {
- s.gapPolicy = "skip"
- return s
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (s *StatsBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsBucketAggregation {
- s.subAggregations[name] = subAggregation
- return s
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (s *StatsBucketAggregation) Meta(metaData map[string]interface{}) *StatsBucketAggregation {
- s.meta = metaData
- return s
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (s *StatsBucketAggregation) BucketsPath(bucketsPaths ...string) *StatsBucketAggregation {
- s.bucketsPaths = append(s.bucketsPaths, bucketsPaths...)
- return s
-}
-
-func (s *StatsBucketAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["stats_bucket"] = params
-
- if s.format != "" {
- params["format"] = s.format
- }
- if s.gapPolicy != "" {
- params["gap_policy"] = s.gapPolicy
- }
-
- // Add buckets paths
- switch len(s.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = s.bucketsPaths[0]
- default:
- params["buckets_path"] = s.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(s.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range s.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(s.meta) > 0 {
- source["meta"] = s.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go
deleted file mode 100644
index 117a73885..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestStatsBucketAggregation(t *testing.T) {
- agg := NewStatsBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"stats_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go
deleted file mode 100644
index c22ae8f50..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// SumBucketAggregation is a sibling pipeline aggregation which calculates
-// the sum across all buckets of a specified metric in a sibling aggregation.
-// The specified metric must be numeric and the sibling aggregation must
-// be a multi-bucket aggregation.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-sum-bucket-aggregation.html
-type SumBucketAggregation struct {
- format string
- gapPolicy string
-
- subAggregations map[string]Aggregation
- meta map[string]interface{}
- bucketsPaths []string
-}
-
-// NewSumBucketAggregation creates and initializes a new SumBucketAggregation.
-func NewSumBucketAggregation() *SumBucketAggregation {
- return &SumBucketAggregation{
- subAggregations: make(map[string]Aggregation),
- bucketsPaths: make([]string, 0),
- }
-}
-
-func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation {
- a.format = format
- return a
-}
-
-// GapPolicy defines what should be done when a gap in the series is discovered.
-// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
-func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation {
- a.gapPolicy = gapPolicy
- return a
-}
-
-// GapInsertZeros inserts zeros for gaps in the series.
-func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation {
- a.gapPolicy = "insert_zeros"
- return a
-}
-
-// GapSkip skips gaps in the series.
-func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation {
- a.gapPolicy = "skip"
- return a
-}
-
-// SubAggregation adds a sub-aggregation to this aggregation.
-func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation {
- a.subAggregations[name] = subAggregation
- return a
-}
-
-// Meta sets the meta data to be included in the aggregation response.
-func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation {
- a.meta = metaData
- return a
-}
-
-// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
-func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation {
- a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
- return a
-}
-
-func (a *SumBucketAggregation) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["sum_bucket"] = params
-
- if a.format != "" {
- params["format"] = a.format
- }
- if a.gapPolicy != "" {
- params["gap_policy"] = a.gapPolicy
- }
-
- // Add buckets paths
- switch len(a.bucketsPaths) {
- case 0:
- case 1:
- params["buckets_path"] = a.bucketsPaths[0]
- default:
- params["buckets_path"] = a.bucketsPaths
- }
-
- // AggregationBuilder (SubAggregations)
- if len(a.subAggregations) > 0 {
- aggsMap := make(map[string]interface{})
- source["aggregations"] = aggsMap
- for name, aggregate := range a.subAggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- }
-
- // Add Meta data if available
- if len(a.meta) > 0 {
- source["meta"] = a.meta
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go
deleted file mode 100644
index be8275c81..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSumBucketAggregation(t *testing.T) {
- agg := NewSumBucketAggregation().BucketsPath("the_sum")
- src, err := agg.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"sum_bucket":{"buckets_path":"the_sum"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go
deleted file mode 100644
index 24dd4eb0f..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go
+++ /dev/null
@@ -1,903 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestAggsIntegrationAvgBucket(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
- builder = builder.Aggregation("sales_per_month", h)
- builder = builder.Aggregation("avg_monthly_sales", NewAvgBucketAggregation().BucketsPath("sales_per_month>sales"))
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.AvgBucket("avg_monthly_sales")
- if !found {
- t.Fatal("expected avg_monthly_sales aggregation")
- }
- if agg == nil {
- t.Fatal("expected avg_monthly_sales aggregation")
- }
- if agg.Value == nil {
- t.Fatal("expected avg_monthly_sales.value != nil")
- }
- if got, want := *agg.Value, float64(939.2); got != want {
- t.Fatalf("expected avg_monthly_sales.value=%v; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationDerivative(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
- h = h.SubAggregation("sales_deriv", NewDerivativeAggregation().BucketsPath("sales"))
- builder = builder.Aggregation("sales_per_month", h)
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.DateHistogram("sales_per_month")
- if !found {
- t.Fatal("expected sales_per_month aggregation")
- }
- if agg == nil {
- t.Fatal("expected sales_per_month aggregation")
- }
- if got, want := len(agg.Buckets), 6; got != want {
- t.Fatalf("expected %d buckets; got: %d", want, got)
- }
-
- if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
-
- d, found := agg.Buckets[0].Derivative("sales_deriv")
- if found {
- t.Fatal("expected no sales_deriv aggregation")
- }
- if d != nil {
- t.Fatal("expected no sales_deriv aggregation")
- }
-
- d, found = agg.Buckets[1].Derivative("sales_deriv")
- if !found {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d == nil {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d.Value != nil {
- t.Fatal("expected sales_deriv value == nil")
- }
-
- d, found = agg.Buckets[2].Derivative("sales_deriv")
- if !found {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d == nil {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d.Value != nil {
- t.Fatal("expected sales_deriv value == nil")
- }
-
- d, found = agg.Buckets[3].Derivative("sales_deriv")
- if !found {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d == nil {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected sales_deriv value != nil")
- }
- if got, want := *d.Value, float64(2348.0); got != want {
- t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[4].Derivative("sales_deriv")
- if !found {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d == nil {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected sales_deriv value != nil")
- }
- if got, want := *d.Value, float64(-1658.0); got != want {
- t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[5].Derivative("sales_deriv")
- if !found {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d == nil {
- t.Fatal("expected sales_deriv aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected sales_deriv value != nil")
- }
- if got, want := *d.Value, float64(-722.0); got != want {
- t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationMaxBucket(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
- builder = builder.Aggregation("sales_per_month", h)
- builder = builder.Aggregation("max_monthly_sales", NewMaxBucketAggregation().BucketsPath("sales_per_month>sales"))
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.MaxBucket("max_monthly_sales")
- if !found {
- t.Fatal("expected max_monthly_sales aggregation")
- }
- if agg == nil {
- t.Fatal("expected max_monthly_sales aggregation")
- }
- if got, want := len(agg.Keys), 1; got != want {
- t.Fatalf("expected len(max_monthly_sales.keys)=%d; got: %d", want, got)
- }
- if got, want := agg.Keys[0], "2015-04-01"; got != want {
- t.Fatalf("expected max_monthly_sales.keys[0]=%v; got: %v", want, got)
- }
- if agg.Value == nil {
- t.Fatal("expected max_monthly_sales.value != nil")
- }
- if got, want := *agg.Value, float64(2448); got != want {
- t.Fatalf("expected max_monthly_sales.value=%v; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationMinBucket(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
- builder = builder.Aggregation("sales_per_month", h)
- builder = builder.Aggregation("min_monthly_sales", NewMinBucketAggregation().BucketsPath("sales_per_month>sales"))
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.MinBucket("min_monthly_sales")
- if !found {
- t.Fatal("expected min_monthly_sales aggregation")
- }
- if agg == nil {
- t.Fatal("expected min_monthly_sales aggregation")
- }
- if got, want := len(agg.Keys), 1; got != want {
- t.Fatalf("expected len(min_monthly_sales.keys)=%d; got: %d", want, got)
- }
- if got, want := agg.Keys[0], "2015-06-01"; got != want {
- t.Fatalf("expected min_monthly_sales.keys[0]=%v; got: %v", want, got)
- }
- if agg.Value == nil {
- t.Fatal("expected min_monthly_sales.value != nil")
- }
- if got, want := *agg.Value, float64(68); got != want {
- t.Fatalf("expected min_monthly_sales.value=%v; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationSumBucket(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
- builder = builder.Aggregation("sales_per_month", h)
- builder = builder.Aggregation("sum_monthly_sales", NewSumBucketAggregation().BucketsPath("sales_per_month>sales"))
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.SumBucket("sum_monthly_sales")
- if !found {
- t.Fatal("expected sum_monthly_sales aggregation")
- }
- if agg == nil {
- t.Fatal("expected sum_monthly_sales aggregation")
- }
- if agg.Value == nil {
- t.Fatal("expected sum_monthly_sales.value != nil")
- }
- if got, want := *agg.Value, float64(4696.0); got != want {
- t.Fatalf("expected sum_monthly_sales.value=%v; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationMovAvg(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("the_sum", NewSumAggregation().Field("price"))
- h = h.SubAggregation("the_movavg", NewMovAvgAggregation().BucketsPath("the_sum"))
- builder = builder.Aggregation("my_date_histo", h)
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.DateHistogram("my_date_histo")
- if !found {
- t.Fatal("expected sum_monthly_sales aggregation")
- }
- if agg == nil {
- t.Fatal("expected sum_monthly_sales aggregation")
- }
- if got, want := len(agg.Buckets), 6; got != want {
- t.Fatalf("expected %d buckets; got: %d", want, got)
- }
-
- d, found := agg.Buckets[0].MovAvg("the_movavg")
- if found {
- t.Fatal("expected no the_movavg aggregation")
- }
- if d != nil {
- t.Fatal("expected no the_movavg aggregation")
- }
-
- d, found = agg.Buckets[1].MovAvg("the_movavg")
- if found {
- t.Fatal("expected no the_movavg aggregation")
- }
- if d != nil {
- t.Fatal("expected no the_movavg aggregation")
- }
-
- d, found = agg.Buckets[2].MovAvg("the_movavg")
- if !found {
- t.Fatal("expected the_movavg aggregation")
- }
- if d == nil {
- t.Fatal("expected the_movavg aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected the_movavg value")
- }
- if got, want := *d.Value, float64(1290.0); got != want {
- t.Fatalf("expected %v buckets; got: %v", want, got)
- }
-
- d, found = agg.Buckets[3].MovAvg("the_movavg")
- if !found {
- t.Fatal("expected the_movavg aggregation")
- }
- if d == nil {
- t.Fatal("expected the_movavg aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected the_movavg value")
- }
- if got, want := *d.Value, float64(695.0); got != want {
- t.Fatalf("expected %v buckets; got: %v", want, got)
- }
-
- d, found = agg.Buckets[4].MovAvg("the_movavg")
- if !found {
- t.Fatal("expected the_movavg aggregation")
- }
- if d == nil {
- t.Fatal("expected the_movavg aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected the_movavg value")
- }
- if got, want := *d.Value, float64(1279.3333333333333); got != want {
- t.Fatalf("expected %v buckets; got: %v", want, got)
- }
-
- d, found = agg.Buckets[5].MovAvg("the_movavg")
- if !found {
- t.Fatal("expected the_movavg aggregation")
- }
- if d == nil {
- t.Fatal("expected the_movavg aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected the_movavg value")
- }
- if got, want := *d.Value, float64(1157.0); got != want {
- t.Fatalf("expected %v buckets; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationCumulativeSum(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
- h = h.SubAggregation("cumulative_sales", NewCumulativeSumAggregation().BucketsPath("sales"))
- builder = builder.Aggregation("sales_per_month", h)
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.DateHistogram("sales_per_month")
- if !found {
- t.Fatal("expected sales_per_month aggregation")
- }
- if agg == nil {
- t.Fatal("expected sales_per_month aggregation")
- }
- if got, want := len(agg.Buckets), 6; got != want {
- t.Fatalf("expected %d buckets; got: %d", want, got)
- }
-
- if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
-
- d, found := agg.Buckets[0].CumulativeSum("cumulative_sales")
- if !found {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d == nil {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected cumulative_sales value != nil")
- }
- if got, want := *d.Value, float64(1290.0); got != want {
- t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[1].CumulativeSum("cumulative_sales")
- if !found {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d == nil {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected cumulative_sales value != nil")
- }
- if got, want := *d.Value, float64(1290.0); got != want {
- t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[2].CumulativeSum("cumulative_sales")
- if !found {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d == nil {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected cumulative_sales value != nil")
- }
- if got, want := *d.Value, float64(1390.0); got != want {
- t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[3].CumulativeSum("cumulative_sales")
- if !found {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d == nil {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected cumulative_sales value != nil")
- }
- if got, want := *d.Value, float64(3838.0); got != want {
- t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[4].CumulativeSum("cumulative_sales")
- if !found {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d == nil {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected cumulative_sales value != nil")
- }
- if got, want := *d.Value, float64(4628.0); got != want {
- t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[5].CumulativeSum("cumulative_sales")
- if !found {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d == nil {
- t.Fatal("expected cumulative_sales aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected cumulative_sales value != nil")
- }
- if got, want := *d.Value, float64(4696.0); got != want {
- t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationBucketScript(t *testing.T) {
- // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("total_sales", NewSumAggregation().Field("price"))
- appleFilter := NewFilterAggregation().Filter(NewTermQuery("manufacturer", "Apple"))
- appleFilter = appleFilter.SubAggregation("sales", NewSumAggregation().Field("price"))
- h = h.SubAggregation("apple_sales", appleFilter)
- h = h.SubAggregation("apple_percentage",
- NewBucketScriptAggregation().
- GapPolicy("insert_zeros").
- AddBucketsPath("appleSales", "apple_sales>sales").
- AddBucketsPath("totalSales", "total_sales").
- Script(NewScript("params.appleSales / params.totalSales * 100")))
- builder = builder.Aggregation("sales_per_month", h)
-
- res, err := builder.Pretty(true).Do(context.TODO())
- if err != nil {
- t.Fatalf("%v (maybe scripting is disabled?)", err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.DateHistogram("sales_per_month")
- if !found {
- t.Fatal("expected sales_per_month aggregation")
- }
- if agg == nil {
- t.Fatal("expected sales_per_month aggregation")
- }
- if got, want := len(agg.Buckets), 6; got != want {
- t.Fatalf("expected %d buckets; got: %d", want, got)
- }
-
- if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
-
- d, found := agg.Buckets[0].BucketScript("apple_percentage")
- if !found {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d == nil {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected apple_percentage value != nil")
- }
- if got, want := *d.Value, float64(100.0); got != want {
- t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[1].BucketScript("apple_percentage")
- if !found {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d == nil {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d.Value != nil {
- t.Fatal("expected apple_percentage value == nil")
- }
-
- d, found = agg.Buckets[2].BucketScript("apple_percentage")
- if !found {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d == nil {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected apple_percentage value != nil")
- }
- if got, want := *d.Value, float64(0.0); got != want {
- t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[3].BucketScript("apple_percentage")
- if !found {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d == nil {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected apple_percentage value != nil")
- }
- if got, want := *d.Value, float64(34.64052287581699); got != want {
- t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[4].BucketScript("apple_percentage")
- if !found {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d == nil {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected apple_percentage value != nil")
- }
- if got, want := *d.Value, float64(0.0); got != want {
- t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[5].BucketScript("apple_percentage")
- if !found {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d == nil {
- t.Fatal("expected apple_percentage aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected apple_percentage value != nil")
- }
- if got, want := *d.Value, float64(0.0); got != want {
- t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
- }
-}
-
-func TestAggsIntegrationBucketSelector(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("total_sales", NewSumAggregation().Field("price"))
- h = h.SubAggregation("sales_bucket_filter",
- NewBucketSelectorAggregation().
- AddBucketsPath("totalSales", "total_sales").
- Script(NewScript("params.totalSales <= 100")))
- builder = builder.Aggregation("sales_per_month", h)
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatalf("%v (maybe scripting is disabled?)", err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.DateHistogram("sales_per_month")
- if !found {
- t.Fatal("expected sales_per_month aggregation")
- }
- if agg == nil {
- t.Fatal("expected sales_per_month aggregation")
- }
- if got, want := len(agg.Buckets), 2; got != want {
- t.Fatalf("expected %d buckets; got: %d", want, got)
- }
-
- if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[1].DocCount, int64(2); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
-}
-
-func TestAggsIntegrationSerialDiff(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- builder := client.Search().
- Index(testOrderIndex).
- Type("doc").
- Query(NewMatchAllQuery()).
- Pretty(true)
- h := NewDateHistogramAggregation().Field("time").Interval("month")
- h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
- h = h.SubAggregation("the_diff", NewSerialDiffAggregation().BucketsPath("sales").Lag(1))
- builder = builder.Aggregation("sales_per_month", h)
-
- res, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
-
- aggs := res.Aggregations
- if aggs == nil {
- t.Fatal("expected aggregations != nil; got: nil")
- }
-
- agg, found := aggs.DateHistogram("sales_per_month")
- if !found {
- t.Fatal("expected sales_per_month aggregation")
- }
- if agg == nil {
- t.Fatal("expected sales_per_month aggregation")
- }
- if got, want := len(agg.Buckets), 6; got != want {
- t.Fatalf("expected %d buckets; got: %d", want, got)
- }
-
- if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
- if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
- t.Fatalf("expected DocCount=%d; got: %d", want, got)
- }
-
- d, found := agg.Buckets[0].SerialDiff("the_diff")
- if found {
- t.Fatal("expected no the_diff aggregation")
- }
- if d != nil {
- t.Fatal("expected no the_diff aggregation")
- }
-
- d, found = agg.Buckets[1].SerialDiff("the_diff")
- if found {
- t.Fatal("expected no the_diff aggregation")
- }
- if d != nil {
- t.Fatal("expected no the_diff aggregation")
- }
-
- d, found = agg.Buckets[2].SerialDiff("the_diff")
- if found {
- t.Fatal("expected no the_diff aggregation")
- }
- if d != nil {
- t.Fatal("expected no the_diff aggregation")
- }
-
- d, found = agg.Buckets[3].SerialDiff("the_diff")
- if !found {
- t.Fatal("expected the_diff aggregation")
- }
- if d == nil {
- t.Fatal("expected the_diff aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected the_diff value != nil")
- }
- if got, want := *d.Value, float64(2348.0); got != want {
- t.Fatalf("expected the_diff.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[4].SerialDiff("the_diff")
- if !found {
- t.Fatal("expected the_diff aggregation")
- }
- if d == nil {
- t.Fatal("expected the_diff aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected the_diff value != nil")
- }
- if got, want := *d.Value, float64(-1658.0); got != want {
- t.Fatalf("expected the_diff.value=%v; got: %v", want, got)
- }
-
- d, found = agg.Buckets[5].SerialDiff("the_diff")
- if !found {
- t.Fatal("expected the_diff aggregation")
- }
- if d == nil {
- t.Fatal("expected the_diff aggregation")
- }
- if d.Value == nil {
- t.Fatal("expected the_diff value != nil")
- }
- if got, want := *d.Value, float64(-722.0); got != want {
- t.Fatalf("expected the_diff.value=%v; got: %v", want, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_test.go b/vendor/github.com/olivere/elastic/search_aggs_test.go
deleted file mode 100644
index f1b6347b3..000000000
--- a/vendor/github.com/olivere/elastic/search_aggs_test.go
+++ /dev/null
@@ -1,3416 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "strings"
- "testing"
- "time"
-)
-
-func TestAggs(t *testing.T) {
- //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndex(t)
-
- /*
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- */
-
- tweet1 := tweet{
- User: "olivere",
- Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Image: "http://golang.org/doc/gopher/gophercolor.png",
- Tags: []string{"golang", "elasticsearch"},
- Location: "48.1333,11.5667", // lat,lon
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere",
- Retweets: 0,
- Message: "Another unrelated topic.",
- Tags: []string{"golang"},
- Location: "48.1189,11.4289", // lat,lon
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae",
- Retweets: 12,
- Message: "Cycling is fun.",
- Tags: []string{"sports", "cycling"},
- Location: "47.7167,11.7167", // lat,lon
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- all := NewMatchAllQuery()
-
- // Terms Aggregate by user name
- globalAgg := NewGlobalAggregation()
- usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
- retweetsAgg := NewTermsAggregation().Field("retweets").Size(10)
- avgRetweetsAgg := NewAvgAggregation().Field("retweets")
- avgRetweetsWithMetaAgg := NewAvgAggregation().Field("retweetsMeta").Meta(map[string]interface{}{"meta": true})
- minRetweetsAgg := NewMinAggregation().Field("retweets")
- maxRetweetsAgg := NewMaxAggregation().Field("retweets")
- sumRetweetsAgg := NewSumAggregation().Field("retweets")
- statsRetweetsAgg := NewStatsAggregation().Field("retweets")
- extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets")
- valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets")
- percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets")
- percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75)
- cardinalityAgg := NewCardinalityAggregation().Field("user")
- significantTermsAgg := NewSignificantTermsAggregation().Field("message")
- samplerAgg := NewSamplerAggregation().SubAggregation("tagged_with", NewTermsAggregation().Field("tags"))
- retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100)
- retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100)
- dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01")
- missingTagsAgg := NewMissingAggregation().Field("tags")
- retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100)
- dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year")
- retweetsFilterAgg := NewFilterAggregation().Filter(
- NewRangeQuery("created").Gte("2012-01-01").Lte("2012-12-31")).
- SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets"))
- queryFilterAgg := NewFilterAggregation().Filter(NewTermQuery("tags", "golang"))
- topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true)
- topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg)
- geoBoundsAgg := NewGeoBoundsAggregation().Field("location")
- geoHashAgg := NewGeoHashGridAggregation().Field("location").Precision(5)
- composite := NewCompositeAggregation().Sources(
- NewCompositeAggregationTermsValuesSource("composite_users").Field("user"),
- NewCompositeAggregationHistogramValuesSource("composite_retweets", 1).Field("retweets"),
- NewCompositeAggregationDateHistogramValuesSource("composite_created", "1m").Field("created"),
- )
-
- // Run query
- builder := client.Search().Index(testIndexName).Query(all).Pretty(true)
- builder = builder.Aggregation("global", globalAgg)
- builder = builder.Aggregation("users", usersAgg)
- builder = builder.Aggregation("retweets", retweetsAgg)
- builder = builder.Aggregation("avgRetweets", avgRetweetsAgg)
- builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg)
- builder = builder.Aggregation("minRetweets", minRetweetsAgg)
- builder = builder.Aggregation("maxRetweets", maxRetweetsAgg)
- builder = builder.Aggregation("sumRetweets", sumRetweetsAgg)
- builder = builder.Aggregation("statsRetweets", statsRetweetsAgg)
- builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg)
- builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg)
- builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg)
- builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg)
- builder = builder.Aggregation("usersCardinality", cardinalityAgg)
- builder = builder.Aggregation("significantTerms", significantTermsAgg)
- builder = builder.Aggregation("sample", samplerAgg)
- builder = builder.Aggregation("retweetsRange", retweetsRangeAgg)
- builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg)
- builder = builder.Aggregation("dateRange", dateRangeAgg)
- builder = builder.Aggregation("missingTags", missingTagsAgg)
- builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg)
- builder = builder.Aggregation("dateHisto", dateHistoAgg)
- builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg)
- builder = builder.Aggregation("queryFilter", queryFilterAgg)
- builder = builder.Aggregation("top-tags", topTagsAgg)
- builder = builder.Aggregation("viewport", geoBoundsAgg)
- builder = builder.Aggregation("geohashed", geoHashAgg)
- // Unnamed filters
- countByUserAgg := NewFiltersAggregation().
- Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae"))
- builder = builder.Aggregation("countByUser", countByUserAgg)
- // Named filters
- countByUserAgg2 := NewFiltersAggregation().
- FilterWithName("olivere", NewTermQuery("user", "olivere")).
- FilterWithName("sandrae", NewTermQuery("user", "sandrae"))
- builder = builder.Aggregation("countByUser2", countByUserAgg2)
- // AvgBucket
- dateHisto := NewDateHistogramAggregation().Field("created").Interval("year")
- dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
- builder = builder.Aggregation("avgBucketDateHisto", dateHisto)
- builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets"))
- // MinBucket
- dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
- dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
- builder = builder.Aggregation("minBucketDateHisto", dateHisto)
- builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets"))
- // MaxBucket
- dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
- dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
- builder = builder.Aggregation("maxBucketDateHisto", dateHisto)
- builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets"))
- // SumBucket
- dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
- dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
- builder = builder.Aggregation("sumBucketDateHisto", dateHisto)
- builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets"))
- // MovAvg
- dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
- dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
- dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets"))
- builder = builder.Aggregation("movingAvgDateHisto", dateHisto)
- builder = builder.Aggregation("composite", composite)
- searchResult, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected Hits != nil; got: nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 3 {
- t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits))
- }
- agg := searchResult.Aggregations
- if agg == nil {
- t.Fatalf("expected Aggregations != nil; got: nil")
- }
-
- // Search for non-existent aggregate should return (nil, false)
- unknownAgg, found := agg.Terms("no-such-aggregate")
- if found {
- t.Errorf("expected unknown aggregation to not be found; got: %v", found)
- }
- if unknownAgg != nil {
- t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg)
- }
-
- // Global
- globalAggRes, found := agg.Global("global")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if globalAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if globalAggRes.DocCount != 3 {
- t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount)
- }
-
- // Search for existent aggregate (by name) should return (aggregate, true)
- termsAggRes, found := agg.Terms("users")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if termsAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(termsAggRes.Buckets) != 2 {
- t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets))
- }
- if termsAggRes.Buckets[0].Key != "olivere" {
- t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key)
- }
- if termsAggRes.Buckets[0].DocCount != 2 {
- t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount)
- }
- if termsAggRes.Buckets[1].Key != "sandrae" {
- t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key)
- }
- if termsAggRes.Buckets[1].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount)
- }
-
- // A terms aggregate with keys that are not strings
- retweetsAggRes, found := agg.Terms("retweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if retweetsAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(retweetsAggRes.Buckets) != 3 {
- t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets))
- }
-
- if retweetsAggRes.Buckets[0].Key != float64(0) {
- t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key)
- }
- if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil {
- t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key)
- } else if got != 0 {
- t.Errorf("expected %d; got: %d", 0, got)
- }
- if retweetsAggRes.Buckets[0].KeyNumber != "0" {
- t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber)
- }
- if retweetsAggRes.Buckets[0].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount)
- }
-
- if retweetsAggRes.Buckets[1].Key != float64(12) {
- t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key)
- }
- if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil {
- t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber)
- } else if got != 12 {
- t.Errorf("expected %d; got: %d", 12, got)
- }
- if retweetsAggRes.Buckets[1].KeyNumber != "12" {
- t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber)
- }
- if retweetsAggRes.Buckets[1].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount)
- }
-
- if retweetsAggRes.Buckets[2].Key != float64(108) {
- t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key)
- }
- if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil {
- t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber)
- } else if got != 108 {
- t.Errorf("expected %d; got: %d", 108, got)
- }
- if retweetsAggRes.Buckets[2].KeyNumber != "108" {
- t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber)
- }
- if retweetsAggRes.Buckets[2].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount)
- }
-
- // avgRetweets
- avgAggRes, found := agg.Avg("avgRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if avgAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if avgAggRes.Value == nil {
- t.Fatalf("expected != nil; got: %v", *avgAggRes.Value)
- }
- if *avgAggRes.Value != 40.0 {
- t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value)
- }
-
- // avgRetweetsWithMeta
- avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if avgMetaAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if avgMetaAggRes.Meta == nil {
- t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta)
- }
- metaDataValue, found := avgMetaAggRes.Meta["meta"]
- if !found {
- t.Fatalf("expected to return meta data key %q; got: %v", "meta", found)
- }
- if flag, ok := metaDataValue.(bool); !ok {
- t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue)
- } else if flag != true {
- t.Fatalf("expected to return meta data key value %v; got: %v", true, flag)
- }
-
- // minRetweets
- minAggRes, found := agg.Min("minRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if minAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if minAggRes.Value == nil {
- t.Fatalf("expected != nil; got: %v", *minAggRes.Value)
- }
- if *minAggRes.Value != 0.0 {
- t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value)
- }
-
- // maxRetweets
- maxAggRes, found := agg.Max("maxRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if maxAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if maxAggRes.Value == nil {
- t.Fatalf("expected != nil; got: %v", *maxAggRes.Value)
- }
- if *maxAggRes.Value != 108.0 {
- t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value)
- }
-
- // sumRetweets
- sumAggRes, found := agg.Sum("sumRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if sumAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if sumAggRes.Value == nil {
- t.Fatalf("expected != nil; got: %v", *sumAggRes.Value)
- }
- if *sumAggRes.Value != 120.0 {
- t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value)
- }
-
- // statsRetweets
- statsAggRes, found := agg.Stats("statsRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if statsAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if statsAggRes.Count != 3 {
- t.Errorf("expected %d; got: %d", 3, statsAggRes.Count)
- }
- if statsAggRes.Min == nil {
- t.Fatalf("expected != nil; got: %v", *statsAggRes.Min)
- }
- if *statsAggRes.Min != 0.0 {
- t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min)
- }
- if statsAggRes.Max == nil {
- t.Fatalf("expected != nil; got: %v", *statsAggRes.Max)
- }
- if *statsAggRes.Max != 108.0 {
- t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max)
- }
- if statsAggRes.Avg == nil {
- t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg)
- }
- if *statsAggRes.Avg != 40.0 {
- t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg)
- }
- if statsAggRes.Sum == nil {
- t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum)
- }
- if *statsAggRes.Sum != 120.0 {
- t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum)
- }
-
- // extstatsRetweets
- extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if extStatsAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if extStatsAggRes.Count != 3 {
- t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count)
- }
- if extStatsAggRes.Min == nil {
- t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min)
- }
- if *extStatsAggRes.Min != 0.0 {
- t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min)
- }
- if extStatsAggRes.Max == nil {
- t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max)
- }
- if *extStatsAggRes.Max != 108.0 {
- t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max)
- }
- if extStatsAggRes.Avg == nil {
- t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg)
- }
- if *extStatsAggRes.Avg != 40.0 {
- t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg)
- }
- if extStatsAggRes.Sum == nil {
- t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum)
- }
- if *extStatsAggRes.Sum != 120.0 {
- t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum)
- }
- if extStatsAggRes.SumOfSquares == nil {
- t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares)
- }
- if *extStatsAggRes.SumOfSquares != 11808.0 {
- t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares)
- }
- if extStatsAggRes.Variance == nil {
- t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance)
- }
- if *extStatsAggRes.Variance != 2336.0 {
- t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance)
- }
- if extStatsAggRes.StdDeviation == nil {
- t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation)
- }
- if *extStatsAggRes.StdDeviation != 48.33218389437829 {
- t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation)
- }
-
- // valueCountRetweets
- valueCountAggRes, found := agg.ValueCount("valueCountRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if valueCountAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if valueCountAggRes.Value == nil {
- t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value)
- }
- if *valueCountAggRes.Value != 3.0 {
- t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value)
- }
-
- // percentilesRetweets
- percentilesAggRes, found := agg.Percentiles("percentilesRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if percentilesAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- // ES 1.4.x returns 7: {"1.0":...}
- // ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...}
- // So we're relaxing the test here.
- if len(percentilesAggRes.Values) == 0 {
- t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values)
- }
- if _, found := percentilesAggRes.Values["0.0"]; found {
- t.Errorf("expected %v; got: %v", false, found)
- }
- if percentilesAggRes.Values["1.0"] != 0.24 {
- t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"])
- }
- if percentilesAggRes.Values["25.0"] != 6.0 {
- t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"])
- }
- if percentilesAggRes.Values["99.0"] != 106.08 {
- t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"])
- }
-
- // percentileRanksRetweets
- percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if percentileRanksAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(percentileRanksAggRes.Values) == 0 {
- t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values)
- }
- if _, found := percentileRanksAggRes.Values["0.0"]; found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 {
- t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"])
- }
- if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 {
- t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"])
- }
- if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 {
- t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"])
- }
-
- // usersCardinality
- cardAggRes, found := agg.Cardinality("usersCardinality")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if cardAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if cardAggRes.Value == nil {
- t.Fatalf("expected != nil; got: %v", *cardAggRes.Value)
- }
- if *cardAggRes.Value != 2 {
- t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value)
- }
-
- // retweetsFilter
- filterAggRes, found := agg.Filter("retweetsFilter")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if filterAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if filterAggRes.DocCount != 2 {
- t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount)
- }
-
- // Retrieve sub-aggregation
- avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub")
- if !found {
- t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false")
- }
- if avgRetweetsAggRes == nil {
- t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil")
- }
- if avgRetweetsAggRes.Value == nil {
- t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value)
- }
- if *avgRetweetsAggRes.Value != 54.0 {
- t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value)
- }
-
- // queryFilter
- queryFilterAggRes, found := agg.Filter("queryFilter")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if queryFilterAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if queryFilterAggRes.DocCount != 2 {
- t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount)
- }
-
- // significantTerms
- stAggRes, found := agg.SignificantTerms("significantTerms")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if stAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if stAggRes.DocCount != 3 {
- t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount)
- }
- if len(stAggRes.Buckets) != 0 {
- t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets))
- }
-
- // sampler
- samplerAggRes, found := agg.Sampler("sample")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if samplerAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if samplerAggRes.DocCount != 3 {
- t.Errorf("expected %v; got: %v", 3, samplerAggRes.DocCount)
- }
- sub, found := samplerAggRes.Aggregations["tagged_with"]
- if !found {
- t.Fatalf("expected sub aggregation %q", "tagged_with")
- }
- if sub == nil {
- t.Fatalf("expected sub aggregation %q; got: %v", "tagged_with", sub)
- }
-
- // retweetsRange
- rangeAggRes, found := agg.Range("retweetsRange")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if rangeAggRes == nil {
- t.Fatal("expected != nil; got: nil")
- }
- if len(rangeAggRes.Buckets) != 3 {
- t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets))
- }
- if rangeAggRes.Buckets[0].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount)
- }
- if rangeAggRes.Buckets[1].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount)
- }
- if rangeAggRes.Buckets[2].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount)
- }
-
- // retweetsKeyedRange
- keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if keyedRangeAggRes == nil {
- t.Fatal("expected != nil; got: nil")
- }
- if len(keyedRangeAggRes.Buckets) != 3 {
- t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets))
- }
- _, found = keyedRangeAggRes.Buckets["no-such-key"]
- if found {
- t.Fatalf("expected bucket to not be found; got: %v", found)
- }
- bucket, found := keyedRangeAggRes.Buckets["*-10.0"]
- if !found {
- t.Fatalf("expected bucket to be found; got: %v", found)
- }
- if bucket.DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
- }
- bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"]
- if !found {
- t.Fatalf("expected bucket to be found; got: %v", found)
- }
- if bucket.DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
- }
- bucket, found = keyedRangeAggRes.Buckets["100.0-*"]
- if !found {
- t.Fatalf("expected bucket to be found; got: %v", found)
- }
- if bucket.DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
- }
-
- // dateRange
- dateRangeRes, found := agg.DateRange("dateRange")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if dateRangeRes == nil {
- t.Fatal("expected != nil; got: nil")
- }
- if dateRangeRes.Buckets[0].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount)
- }
- if dateRangeRes.Buckets[0].From != nil {
- t.Fatal("expected From to be nil")
- }
- if dateRangeRes.Buckets[0].To == nil {
- t.Fatal("expected To to be != nil")
- }
- if *dateRangeRes.Buckets[0].To != 1.325376e+12 {
- t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To)
- }
- if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" {
- t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString)
- }
- if dateRangeRes.Buckets[1].DocCount != 2 {
- t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount)
- }
- if dateRangeRes.Buckets[1].From == nil {
- t.Fatal("expected From to be != nil")
- }
- if *dateRangeRes.Buckets[1].From != 1.325376e+12 {
- t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From)
- }
- if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" {
- t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString)
- }
- if dateRangeRes.Buckets[1].To == nil {
- t.Fatal("expected To to be != nil")
- }
- if *dateRangeRes.Buckets[1].To != 1.3569984e+12 {
- t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To)
- }
- if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" {
- t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString)
- }
- if dateRangeRes.Buckets[2].DocCount != 0 {
- t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount)
- }
- if dateRangeRes.Buckets[2].To != nil {
- t.Fatal("expected To to be nil")
- }
- if dateRangeRes.Buckets[2].From == nil {
- t.Fatal("expected From to be != nil")
- }
- if *dateRangeRes.Buckets[2].From != 1.3569984e+12 {
- t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From)
- }
- if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" {
- t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString)
- }
-
- // missingTags
- missingRes, found := agg.Missing("missingTags")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if missingRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if missingRes.DocCount != 0 {
- t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount)
- }
-
- // retweetsHisto
- histoRes, found := agg.Histogram("retweetsHisto")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if histoRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(histoRes.Buckets) != 2 {
- t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets))
- }
- if histoRes.Buckets[0].DocCount != 2 {
- t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount)
- }
- if histoRes.Buckets[0].Key != 0.0 {
- t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key)
- }
- if histoRes.Buckets[1].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount)
- }
- if histoRes.Buckets[1].Key != 100.0 {
- t.Errorf("expected %v; got: %+v", 100.0, histoRes.Buckets[1].Key)
- }
-
- // dateHisto
- dateHistoRes, found := agg.DateHistogram("dateHisto")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if dateHistoRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(dateHistoRes.Buckets) != 2 {
- t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets))
- }
- if dateHistoRes.Buckets[0].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount)
- }
- if dateHistoRes.Buckets[0].Key != 1.29384e+12 {
- t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key)
- }
- if dateHistoRes.Buckets[0].KeyAsString == nil {
- t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[0].KeyAsString)
- }
- if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" {
- t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString)
- }
- if dateHistoRes.Buckets[1].DocCount != 2 {
- t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount)
- }
- if dateHistoRes.Buckets[1].Key != 1.325376e+12 {
- t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key)
- }
- if dateHistoRes.Buckets[1].KeyAsString == nil {
- t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[1].KeyAsString)
- }
- if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" {
- t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString)
- }
-
- // topHits
- topTags, found := agg.Terms("top-tags")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if topTags == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if topTags.DocCountErrorUpperBound != 0 {
- t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound)
- }
- if topTags.SumOfOtherDocCount != 1 {
- t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount)
- }
- if len(topTags.Buckets) != 3 {
- t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets))
- }
- if topTags.Buckets[0].DocCount != 2 {
- t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount)
- }
- if topTags.Buckets[0].Key != "golang" {
- t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key)
- }
- topHits, found := topTags.Buckets[0].TopHits("top_tag_hits")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if topHits == nil {
- t.Fatal("expected != nil; got: nil")
- }
- if topHits.Hits == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if topHits.Hits.TotalHits != 2 {
- t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits)
- }
- if topHits.Hits.Hits == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(topHits.Hits.Hits) != 2 {
- t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits))
- }
- hit := topHits.Hits.Hits[0]
- if !found {
- t.Fatalf("expected %v; got: %v", true, found)
- }
- if hit == nil {
- t.Fatal("expected != nil; got: nil")
- }
- var tw tweet
- if err := json.Unmarshal(*hit.Source, &tw); err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if tw.Message != "Welcome to Golang and Elasticsearch." {
- t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message)
- }
- if topTags.Buckets[1].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount)
- }
- if topTags.Buckets[1].Key != "cycling" {
- t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key)
- }
- topHits, found = topTags.Buckets[1].TopHits("top_tag_hits")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if topHits == nil {
- t.Fatal("expected != nil; got: nil")
- }
- if topHits.Hits == nil {
- t.Fatal("expected != nil; got nil")
- }
- if topHits.Hits.TotalHits != 1 {
- t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)
- }
- if topTags.Buckets[2].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount)
- }
- if topTags.Buckets[2].Key != "elasticsearch" {
- t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key)
- }
- topHits, found = topTags.Buckets[2].TopHits("top_tag_hits")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if topHits == nil {
- t.Fatal("expected != nil; got: nil")
- }
- if topHits.Hits == nil {
- t.Fatal("expected != nil; got: nil")
- }
- if topHits.Hits.TotalHits != 1 {
- t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)
- }
-
- // viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name)
- geoBoundsRes, found := agg.GeoBounds("viewport")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if geoBoundsRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
-
- // geohashed via geohash
- geoHashRes, found := agg.GeoHash("geohashed")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if geoHashRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
-
- // Filters agg "countByUser" (unnamed)
- countByUserAggRes, found := agg.Filters("countByUser")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if countByUserAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(countByUserAggRes.Buckets) != 2 {
- t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets))
- }
- if len(countByUserAggRes.NamedBuckets) != 0 {
- t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets))
- }
- if countByUserAggRes.Buckets[0].DocCount != 2 {
- t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount)
- }
- if countByUserAggRes.Buckets[1].DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount)
- }
-
- // Filters agg "countByUser2" (named)
- countByUser2AggRes, found := agg.Filters("countByUser2")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if countByUser2AggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if len(countByUser2AggRes.Buckets) != 0 {
- t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets))
- }
- if len(countByUser2AggRes.NamedBuckets) != 2 {
- t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets))
- }
- b, found := countByUser2AggRes.NamedBuckets["olivere"]
- if !found {
- t.Fatalf("expected bucket %q; got: %v", "olivere", found)
- }
- if b == nil {
- t.Fatalf("expected bucket %q; got: %v", "olivere", b)
- }
- if b.DocCount != 2 {
- t.Errorf("expected %d; got: %d", 2, b.DocCount)
- }
- b, found = countByUser2AggRes.NamedBuckets["sandrae"]
- if !found {
- t.Fatalf("expected bucket %q; got: %v", "sandrae", found)
- }
- if b == nil {
- t.Fatalf("expected bucket %q; got: %v", "sandrae", b)
- }
- if b.DocCount != 1 {
- t.Errorf("expected %d; got: %d", 1, b.DocCount)
- }
-
- compositeAggRes, found := agg.Composite("composite")
- if !found {
- t.Errorf("expected %v; got: %v", true, found)
- }
- if compositeAggRes == nil {
- t.Fatalf("expected != nil; got: nil")
- }
- if want, have := 3, len(compositeAggRes.Buckets); want != have {
- t.Fatalf("expected %d; got: %d", want, have)
- }
-}
-
-// TestAggsMarshal ensures that marshaling aggregations back into a string
-// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51
-// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details.
-func TestAggsMarshal(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere",
- Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Image: "http://golang.org/doc/gopher/gophercolor.png",
- Tags: []string{"golang", "elasticsearch"},
- Location: "48.1333,11.5667", // lat,lon
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- all := NewMatchAllQuery()
- dhagg := NewDateHistogramAggregation().Field("created").Interval("year")
-
- // Run query
- builder := client.Search().Index(testIndexName).Query(all)
- builder = builder.Aggregation("dhagg", dhagg)
- searchResult, err := builder.Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.TotalHits() != 1 {
- t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits())
- }
- if _, found := searchResult.Aggregations["dhagg"]; !found {
- t.Fatalf("expected aggregation %q", "dhagg")
- }
- buf, err := json.Marshal(searchResult)
- if err != nil {
- t.Fatal(err)
- }
- s := string(buf)
- if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 {
- t.Errorf("expected to serialize aggregation into string; got: %v", s)
- }
-}
-
-func TestAggsMetricsMin(t *testing.T) {
- s := `{
- "min_price": {
- "value": 10
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Min("min_price")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(10) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)
- }
-}
-
-func TestAggsMetricsMax(t *testing.T) {
- s := `{
- "max_price": {
- "value": 35
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Max("max_price")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(35) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value)
- }
-}
-
-func TestAggsMetricsSum(t *testing.T) {
- s := `{
- "intraday_return": {
- "value": 2.18
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Sum("intraday_return")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(2.18) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value)
- }
-}
-
-func TestAggsMetricsAvg(t *testing.T) {
- s := `{
- "avg_grade": {
- "value": 75
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Avg("avg_grade")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(75) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value)
- }
-}
-
-func TestAggsMetricsValueCount(t *testing.T) {
- s := `{
- "grades_count": {
- "value": 10
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.ValueCount("grades_count")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(10) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)
- }
-}
-
-func TestAggsMetricsCardinality(t *testing.T) {
- s := `{
- "author_count": {
- "value": 12
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Cardinality("author_count")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(12) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value)
- }
-}
-
-func TestAggsMetricsStats(t *testing.T) {
- s := `{
- "grades_stats": {
- "count": 6,
- "min": 60,
- "max": 98,
- "avg": 78.5,
- "sum": 471
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Stats("grades_stats")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Count != int64(6) {
- t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)
- }
- if agg.Min == nil {
- t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)
- }
- if *agg.Min != float64(60) {
- t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min)
- }
- if agg.Max == nil {
- t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)
- }
- if *agg.Max != float64(98) {
- t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max)
- }
- if agg.Avg == nil {
- t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)
- }
- if *agg.Avg != float64(78.5) {
- t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg)
- }
- if agg.Sum == nil {
- t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)
- }
- if *agg.Sum != float64(471) {
- t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum)
- }
-}
-
-func TestAggsMetricsExtendedStats(t *testing.T) {
- s := `{
- "grades_stats": {
- "count": 6,
- "min": 72,
- "max": 117.6,
- "avg": 94.2,
- "sum": 565.2,
- "sum_of_squares": 54551.51999999999,
- "variance": 218.2799999999976,
- "std_deviation": 14.774302013969987
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.ExtendedStats("grades_stats")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Count != int64(6) {
- t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)
- }
- if agg.Min == nil {
- t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)
- }
- if *agg.Min != float64(72) {
- t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min)
- }
- if agg.Max == nil {
- t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)
- }
- if *agg.Max != float64(117.6) {
- t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max)
- }
- if agg.Avg == nil {
- t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)
- }
- if *agg.Avg != float64(94.2) {
- t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg)
- }
- if agg.Sum == nil {
- t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)
- }
- if *agg.Sum != float64(565.2) {
- t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum)
- }
- if agg.SumOfSquares == nil {
- t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares)
- }
- if *agg.SumOfSquares != float64(54551.51999999999) {
- t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares)
- }
- if agg.Variance == nil {
- t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance)
- }
- if *agg.Variance != float64(218.2799999999976) {
- t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance)
- }
- if agg.StdDeviation == nil {
- t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation)
- }
- if *agg.StdDeviation != float64(14.774302013969987) {
- t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation)
- }
-}
-
-func TestAggsMatrixStats(t *testing.T) {
- s := `{
- "matrixstats": {
- "fields": [{
- "name": "income",
- "count": 50,
- "mean": 51985.1,
- "variance": 7.383377037755103E7,
- "skewness": 0.5595114003506483,
- "kurtosis": 2.5692365287787124,
- "covariance": {
- "income": 7.383377037755103E7,
- "poverty": -21093.65836734694
- },
- "correlation": {
- "income": 1.0,
- "poverty": -0.8352655256272504
- }
- }, {
- "name": "poverty",
- "count": 51,
- "mean": 12.732000000000001,
- "variance": 8.637730612244896,
- "skewness": 0.4516049811903419,
- "kurtosis": 2.8615929677997767,
- "covariance": {
- "income": -21093.65836734694,
- "poverty": 8.637730612244896
- },
- "correlation": {
- "income": -0.8352655256272504,
- "poverty": 1.0
- }
- }]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.MatrixStats("matrixstats")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if want, got := 2, len(agg.Fields); want != got {
- t.Fatalf("expected aggregaton len(Fields) = %v; got: %v", want, got)
- }
- field := agg.Fields[0]
- if want, got := "income", field.Name; want != got {
- t.Fatalf("expected aggregation field name == %q; got: %q", want, got)
- }
- if want, got := int64(50), field.Count; want != got {
- t.Fatalf("expected aggregation field count == %v; got: %v", want, got)
- }
- if want, got := 51985.1, field.Mean; want != got {
- t.Fatalf("expected aggregation field mean == %v; got: %v", want, got)
- }
- if want, got := 7.383377037755103e7, field.Variance; want != got {
- t.Fatalf("expected aggregation field variance == %v; got: %v", want, got)
- }
- if want, got := 0.5595114003506483, field.Skewness; want != got {
- t.Fatalf("expected aggregation field skewness == %v; got: %v", want, got)
- }
- if want, got := 2.5692365287787124, field.Kurtosis; want != got {
- t.Fatalf("expected aggregation field kurtosis == %v; got: %v", want, got)
- }
- if field.Covariance == nil {
- t.Fatalf("expected aggregation field covariance != nil; got: %v", nil)
- }
- if want, got := 7.383377037755103e7, field.Covariance["income"]; want != got {
- t.Fatalf("expected aggregation field covariance == %v; got: %v", want, got)
- }
- if want, got := -21093.65836734694, field.Covariance["poverty"]; want != got {
- t.Fatalf("expected aggregation field covariance == %v; got: %v", want, got)
- }
- if field.Correlation == nil {
- t.Fatalf("expected aggregation field correlation != nil; got: %v", nil)
- }
- if want, got := 1.0, field.Correlation["income"]; want != got {
- t.Fatalf("expected aggregation field correlation == %v; got: %v", want, got)
- }
- if want, got := -0.8352655256272504, field.Correlation["poverty"]; want != got {
- t.Fatalf("expected aggregation field correlation == %v; got: %v", want, got)
- }
- field = agg.Fields[1]
- if want, got := "poverty", field.Name; want != got {
- t.Fatalf("expected aggregation field name == %q; got: %q", want, got)
- }
- if want, got := int64(51), field.Count; want != got {
- t.Fatalf("expected aggregation field count == %v; got: %v", want, got)
- }
-}
-
-func TestAggsMetricsPercentiles(t *testing.T) {
- s := `{
- "load_time_outlier": {
- "values" : {
- "1.0": 15,
- "5.0": 20,
- "25.0": 23,
- "50.0": 25,
- "75.0": 29,
- "95.0": 60,
- "99.0": 150
- }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Percentiles("load_time_outlier")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Values == nil {
- t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)
- }
- if len(agg.Values) != 7 {
- t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))
- }
- if agg.Values["1.0"] != float64(15) {
- t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"])
- }
- if agg.Values["5.0"] != float64(20) {
- t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"])
- }
- if agg.Values["25.0"] != float64(23) {
- t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"])
- }
- if agg.Values["50.0"] != float64(25) {
- t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"])
- }
- if agg.Values["75.0"] != float64(29) {
- t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"])
- }
- if agg.Values["95.0"] != float64(60) {
- t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"])
- }
- if agg.Values["99.0"] != float64(150) {
- t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"])
- }
-}
-
-func TestAggsMetricsPercentileRanks(t *testing.T) {
- s := `{
- "load_time_outlier": {
- "values" : {
- "15": 92,
- "30": 100
- }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.PercentileRanks("load_time_outlier")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Values == nil {
- t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)
- }
- if len(agg.Values) != 2 {
- t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))
- }
- if agg.Values["15"] != float64(92) {
- t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"])
- }
- if agg.Values["30"] != float64(100) {
- t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"])
- }
-}
-
-func TestAggsMetricsTopHits(t *testing.T) {
- s := `{
- "top-tags": {
- "buckets": [
- {
- "key": "windows-7",
- "doc_count": 25365,
- "top_tags_hits": {
- "hits": {
- "total": 25365,
- "max_score": 1,
- "hits": [
- {
- "_index": "stack",
- "_type": "question",
- "_id": "602679",
- "_score": 1,
- "_source": {
- "title": "Windows port opening"
- },
- "sort": [
- 1370143231177
- ]
- }
- ]
- }
- }
- },
- {
- "key": "linux",
- "doc_count": 18342,
- "top_tags_hits": {
- "hits": {
- "total": 18342,
- "max_score": 1,
- "hits": [
- {
- "_index": "stack",
- "_type": "question",
- "_id": "602672",
- "_score": 1,
- "_source": {
- "title": "Ubuntu RFID Screensaver lock-unlock"
- },
- "sort": [
- 1370143379747
- ]
- }
- ]
- }
- }
- },
- {
- "key": "windows",
- "doc_count": 18119,
- "top_tags_hits": {
- "hits": {
- "total": 18119,
- "max_score": 1,
- "hits": [
- {
- "_index": "stack",
- "_type": "question",
- "_id": "602678",
- "_score": 1,
- "_source": {
- "title": "If I change my computers date / time, what could be affected?"
- },
- "sort": [
- 1370142868283
- ]
- }
- ]
- }
- }
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Terms("top-tags")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 3 {
- t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != "windows-7" {
- t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key)
- }
- if agg.Buckets[1].Key != "linux" {
- t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key)
- }
- if agg.Buckets[2].Key != "windows" {
- t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key)
- }
-
- // Sub-aggregation of top-hits
- subAgg, found := agg.Buckets[0].TopHits("top_tags_hits")
- if !found {
- t.Fatalf("expected sub aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
- }
- if subAgg.Hits == nil {
- t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
- }
- if subAgg.Hits.TotalHits != 25365 {
- t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits)
- }
- if subAgg.Hits.MaxScore == nil {
- t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
- }
- if *subAgg.Hits.MaxScore != float64(1.0) {
- t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
- }
-
- subAgg, found = agg.Buckets[1].TopHits("top_tags_hits")
- if !found {
- t.Fatalf("expected sub aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
- }
- if subAgg.Hits == nil {
- t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
- }
- if subAgg.Hits.TotalHits != 18342 {
- t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits)
- }
- if subAgg.Hits.MaxScore == nil {
- t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
- }
- if *subAgg.Hits.MaxScore != float64(1.0) {
- t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
- }
-
- subAgg, found = agg.Buckets[2].TopHits("top_tags_hits")
- if !found {
- t.Fatalf("expected sub aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
- }
- if subAgg.Hits == nil {
- t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
- }
- if subAgg.Hits.TotalHits != 18119 {
- t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits)
- }
- if subAgg.Hits.MaxScore == nil {
- t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
- }
- if *subAgg.Hits.MaxScore != float64(1.0) {
- t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
- }
-}
-
-func TestAggsBucketGlobal(t *testing.T) {
- s := `{
- "all_products" : {
- "doc_count" : 100,
- "avg_price" : {
- "value" : 56.3
- }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Global("all_products")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 100 {
- t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)
- }
-
- // Sub-aggregation
- subAgg, found := agg.Avg("avg_price")
- if !found {
- t.Fatalf("expected sub-aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
- }
- if subAgg.Value == nil {
- t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
- }
- if *subAgg.Value != float64(56.3) {
- t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)
- }
-}
-
-func TestAggsBucketFilter(t *testing.T) {
- s := `{
- "in_stock_products" : {
- "doc_count" : 100,
- "avg_price" : { "value" : 56.3 }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Filter("in_stock_products")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 100 {
- t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)
- }
-
- // Sub-aggregation
- subAgg, found := agg.Avg("avg_price")
- if !found {
- t.Fatalf("expected sub-aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
- }
- if subAgg.Value == nil {
- t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
- }
- if *subAgg.Value != float64(56.3) {
- t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)
- }
-}
-
-func TestAggsBucketFiltersWithBuckets(t *testing.T) {
- s := `{
- "messages" : {
- "buckets" : [
- {
- "doc_count" : 34,
- "monthly" : {
- "buckets" : []
- }
- },
- {
- "doc_count" : 439,
- "monthly" : {
- "buckets" : []
- }
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Filters("messages")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets))
- }
-
- if agg.Buckets[0].DocCount != 34 {
- t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount)
- }
- subAgg, found := agg.Buckets[0].Histogram("monthly")
- if !found {
- t.Fatalf("expected sub aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
- }
-
- if agg.Buckets[1].DocCount != 439 {
- t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount)
- }
- subAgg, found = agg.Buckets[1].Histogram("monthly")
- if !found {
- t.Fatalf("expected sub aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
- }
-}
-
-func TestAggsBucketFiltersWithNamedBuckets(t *testing.T) {
- s := `{
- "messages" : {
- "buckets" : {
- "errors" : {
- "doc_count" : 34,
- "monthly" : {
- "buckets" : []
- }
- },
- "warnings" : {
- "doc_count" : 439,
- "monthly" : {
- "buckets" : []
- }
- }
- }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Filters("messages")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.NamedBuckets == nil {
- t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets)
- }
- if len(agg.NamedBuckets) != 2 {
- t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets))
- }
-
- if agg.NamedBuckets["errors"].DocCount != 34 {
- t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount)
- }
- subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly")
- if !found {
- t.Fatalf("expected sub aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
- }
-
- if agg.NamedBuckets["warnings"].DocCount != 439 {
- t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount)
- }
- subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly")
- if !found {
- t.Fatalf("expected sub aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
- }
-}
-
-func TestAggsBucketMissing(t *testing.T) {
- s := `{
- "products_without_a_price" : {
- "doc_count" : 10
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Missing("products_without_a_price")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 10 {
- t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
- }
-}
-
-func TestAggsBucketNested(t *testing.T) {
- s := `{
- "resellers": {
- "min_price": {
- "value" : 350
- }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Nested("resellers")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 0 {
- t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount)
- }
-
- // Sub-aggregation
- subAgg, found := agg.Avg("min_price")
- if !found {
- t.Fatalf("expected sub-aggregation to be found; got: %v", found)
- }
- if subAgg == nil {
- t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
- }
- if subAgg.Value == nil {
- t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
- }
- if *subAgg.Value != float64(350) {
- t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value)
- }
-}
-
-func TestAggsBucketReverseNested(t *testing.T) {
- s := `{
- "comment_to_issue": {
- "doc_count" : 10
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.ReverseNested("comment_to_issue")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 10 {
- t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
- }
-}
-
-func TestAggsBucketChildren(t *testing.T) {
- s := `{
- "to-answers": {
- "doc_count" : 10
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Children("to-answers")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 10 {
- t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
- }
-}
-
-func TestAggsBucketTerms(t *testing.T) {
- s := `{
- "users" : {
- "doc_count_error_upper_bound" : 1,
- "sum_other_doc_count" : 2,
- "buckets" : [ {
- "key" : "olivere",
- "doc_count" : 2
- }, {
- "key" : "sandrae",
- "doc_count" : 1
- } ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Terms("users")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != "olivere" {
- t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key)
- }
- if agg.Buckets[0].DocCount != 2 {
- t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].Key != "sandrae" {
- t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key)
- }
- if agg.Buckets[1].DocCount != 1 {
- t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
- }
-}
-
-func TestAggsBucketTermsWithNumericKeys(t *testing.T) {
- s := `{
- "users" : {
- "doc_count_error_upper_bound" : 1,
- "sum_other_doc_count" : 2,
- "buckets" : [ {
- "key" : 17,
- "doc_count" : 2
- }, {
- "key" : 21,
- "doc_count" : 1
- } ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Terms("users")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != float64(17) {
- t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)
- }
- if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil {
- t.Errorf("expected to convert key to int64; got: %v", err)
- } else if got != 17 {
- t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)
- }
- if agg.Buckets[0].DocCount != 2 {
- t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].Key != float64(21) {
- t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)
- }
- if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil {
- t.Errorf("expected to convert key to int64; got: %v", err)
- } else if got != 21 {
- t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)
- }
- if agg.Buckets[1].DocCount != 1 {
- t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
- }
-}
-
-func TestAggsBucketTermsWithBoolKeys(t *testing.T) {
- s := `{
- "users" : {
- "doc_count_error_upper_bound" : 1,
- "sum_other_doc_count" : 2,
- "buckets" : [ {
- "key" : true,
- "doc_count" : 2
- }, {
- "key" : false,
- "doc_count" : 1
- } ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Terms("users")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != true {
- t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key)
- }
- if agg.Buckets[0].DocCount != 2 {
- t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].Key != false {
- t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key)
- }
- if agg.Buckets[1].DocCount != 1 {
- t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
- }
-}
-
-func TestAggsBucketSignificantTerms(t *testing.T) {
- s := `{
- "significantCrimeTypes" : {
- "doc_count": 47347,
- "buckets" : [
- {
- "key": "Bicycle theft",
- "doc_count": 3640,
- "score": 0.371235374214817,
- "bg_count": 66799
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.SignificantTerms("significantCrimeTypes")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 47347 {
- t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 1 {
- t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != "Bicycle theft" {
- t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key)
- }
- if agg.Buckets[0].DocCount != 3640 {
- t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[0].Score != float64(0.371235374214817) {
- t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score)
- }
- if agg.Buckets[0].BgCount != 66799 {
- t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount)
- }
-}
-
-func TestAggsBucketSampler(t *testing.T) {
- s := `{
- "sample" : {
- "doc_count": 1000,
- "keywords": {
- "doc_count": 1000,
- "buckets" : [
- {
- "key": "bend",
- "doc_count": 58,
- "score": 37.982536582524276,
- "bg_count": 103
- }
- ]
- }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Sampler("sample")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.DocCount != 1000 {
- t.Fatalf("expected aggregation DocCount != %d; got: %d", 1000, agg.DocCount)
- }
- sub, found := agg.Aggregations["keywords"]
- if !found {
- t.Fatalf("expected sub aggregation %q", "keywords")
- }
- if sub == nil {
- t.Fatalf("expected sub aggregation %q; got: %v", "keywords", sub)
- }
-}
-
-func TestAggsBucketRange(t *testing.T) {
- s := `{
- "price_ranges" : {
- "buckets": [
- {
- "to": 50,
- "doc_count": 2
- },
- {
- "from": 50,
- "to": 100,
- "doc_count": 4
- },
- {
- "from": 100,
- "doc_count": 4
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Range("price_ranges")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 3 {
- t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
- }
- if agg.Buckets[0].From != nil {
- t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
- }
- if agg.Buckets[0].To == nil {
- t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
- }
- if *agg.Buckets[0].To != float64(50) {
- t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To)
- }
- if agg.Buckets[0].DocCount != 2 {
- t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].From == nil {
- t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
- }
- if *agg.Buckets[1].From != float64(50) {
- t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From)
- }
- if agg.Buckets[1].To == nil {
- t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)
- }
- if *agg.Buckets[1].To != float64(100) {
- t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To)
- }
- if agg.Buckets[1].DocCount != 4 {
- t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount)
- }
- if agg.Buckets[2].From == nil {
- t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)
- }
- if *agg.Buckets[2].From != float64(100) {
- t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From)
- }
- if agg.Buckets[2].To != nil {
- t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)
- }
- if agg.Buckets[2].DocCount != 4 {
- t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount)
- }
-}
-
-func TestAggsBucketDateRange(t *testing.T) {
- s := `{
- "range": {
- "buckets": [
- {
- "to": 1.3437792E+12,
- "to_as_string": "08-2012",
- "doc_count": 7
- },
- {
- "from": 1.3437792E+12,
- "from_as_string": "08-2012",
- "doc_count": 2
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.DateRange("range")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
- }
- if agg.Buckets[0].From != nil {
- t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
- }
- if agg.Buckets[0].To == nil {
- t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
- }
- if *agg.Buckets[0].To != float64(1.3437792E+12) {
- t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To)
- }
- if agg.Buckets[0].ToAsString != "08-2012" {
- t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString)
- }
- if agg.Buckets[0].DocCount != 7 {
- t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].From == nil {
- t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
- }
- if *agg.Buckets[1].From != float64(1.3437792E+12) {
- t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From)
- }
- if agg.Buckets[1].FromAsString != "08-2012" {
- t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString)
- }
- if agg.Buckets[1].To != nil {
- t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)
- }
- if agg.Buckets[1].DocCount != 2 {
- t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount)
- }
-}
-
-func TestAggsBucketIPRange(t *testing.T) {
- s := `{
- "ip_ranges": {
- "buckets" : [
- {
- "to": 167772165,
- "to_as_string": "10.0.0.5",
- "doc_count": 4
- },
- {
- "from": 167772165,
- "from_as_string": "10.0.0.5",
- "doc_count": 6
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.IPRange("ip_ranges")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
- }
- if agg.Buckets[0].From != nil {
- t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
- }
- if agg.Buckets[0].To == nil {
- t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
- }
- if *agg.Buckets[0].To != float64(167772165) {
- t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To)
- }
- if agg.Buckets[0].ToAsString != "10.0.0.5" {
- t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString)
- }
- if agg.Buckets[0].DocCount != 4 {
- t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].From == nil {
- t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
- }
- if *agg.Buckets[1].From != float64(167772165) {
- t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From)
- }
- if agg.Buckets[1].FromAsString != "10.0.0.5" {
- t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString)
- }
- if agg.Buckets[1].To != nil {
- t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)
- }
- if agg.Buckets[1].DocCount != 6 {
- t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount)
- }
-}
-
-func TestAggsBucketHistogram(t *testing.T) {
- s := `{
- "prices" : {
- "buckets": [
- {
- "key": 0,
- "doc_count": 2
- },
- {
- "key": 50,
- "doc_count": 4
- },
- {
- "key": 150,
- "doc_count": 3
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Histogram("prices")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 3 {
- t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != 0 {
- t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key)
- }
- if agg.Buckets[0].KeyAsString != nil {
- t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString)
- }
- if agg.Buckets[0].DocCount != 2 {
- t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].Key != 50 {
- t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key)
- }
- if agg.Buckets[1].KeyAsString != nil {
- t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString)
- }
- if agg.Buckets[1].DocCount != 4 {
- t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount)
- }
- if agg.Buckets[2].Key != 150 {
- t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key)
- }
- if agg.Buckets[2].KeyAsString != nil {
- t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString)
- }
- if agg.Buckets[2].DocCount != 3 {
- t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount)
- }
-}
-
-func TestAggsBucketDateHistogram(t *testing.T) {
- s := `{
- "articles_over_time": {
- "buckets": [
- {
- "key_as_string": "2013-02-02",
- "key": 1328140800000,
- "doc_count": 1
- },
- {
- "key_as_string": "2013-03-02",
- "key": 1330646400000,
- "doc_count": 2
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.DateHistogram("articles_over_time")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != 1328140800000 {
- t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key)
- }
- if agg.Buckets[0].KeyAsString == nil {
- t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString)
- }
- if *agg.Buckets[0].KeyAsString != "2013-02-02" {
- t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString)
- }
- if agg.Buckets[0].DocCount != 1 {
- t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].Key != 1330646400000 {
- t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key)
- }
- if agg.Buckets[1].KeyAsString == nil {
- t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString)
- }
- if *agg.Buckets[1].KeyAsString != "2013-03-02" {
- t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString)
- }
- if agg.Buckets[1].DocCount != 2 {
- t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount)
- }
-}
-
-func TestAggsMetricsGeoBounds(t *testing.T) {
- s := `{
- "viewport": {
- "bounds": {
- "top_left": {
- "lat": 80.45,
- "lon": -160.22
- },
- "bottom_right": {
- "lat": 40.65,
- "lon": 42.57
- }
- }
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.GeoBounds("viewport")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Bounds.TopLeft.Latitude != float64(80.45) {
- t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude)
- }
- if agg.Bounds.TopLeft.Longitude != float64(-160.22) {
- t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude)
- }
- if agg.Bounds.BottomRight.Latitude != float64(40.65) {
- t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude)
- }
- if agg.Bounds.BottomRight.Longitude != float64(42.57) {
- t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude)
- }
-}
-
-func TestAggsBucketGeoHash(t *testing.T) {
- s := `{
- "myLarge-GrainGeoHashGrid": {
- "buckets": [
- {
- "key": "svz",
- "doc_count": 10964
- },
- {
- "key": "sv8",
- "doc_count": 3198
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
- }
- if agg.Buckets[0].Key != "svz" {
- t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key)
- }
- if agg.Buckets[0].DocCount != 10964 {
- t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount)
- }
- if agg.Buckets[1].Key != "sv8" {
- t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key)
- }
- if agg.Buckets[1].DocCount != 3198 {
- t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount)
- }
-}
-
-func TestAggsBucketGeoDistance(t *testing.T) {
- s := `{
- "rings" : {
- "buckets": [
- {
- "unit": "km",
- "to": 100.0,
- "doc_count": 3
- },
- {
- "unit": "km",
- "from": 100.0,
- "to": 300.0,
- "doc_count": 1
- },
- {
- "unit": "km",
- "from": 300.0,
- "doc_count": 7
- }
- ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.GeoDistance("rings")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Buckets == nil {
- t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
- }
- if len(agg.Buckets) != 3 {
- t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
- }
- if agg.Buckets[0].From != nil {
- t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
- }
- if agg.Buckets[0].To == nil {
- t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
- }
- if *agg.Buckets[0].To != float64(100.0) {
- t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To)
- }
- if agg.Buckets[0].DocCount != 3 {
- t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)
- }
-
- if agg.Buckets[1].From == nil {
- t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
- }
- if *agg.Buckets[1].From != float64(100.0) {
- t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From)
- }
- if agg.Buckets[1].To == nil {
- t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)
- }
- if *agg.Buckets[1].To != float64(300.0) {
- t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To)
- }
- if agg.Buckets[1].DocCount != 1 {
- t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount)
- }
-
- if agg.Buckets[2].From == nil {
- t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)
- }
- if *agg.Buckets[2].From != float64(300.0) {
- t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From)
- }
- if agg.Buckets[2].To != nil {
- t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)
- }
- if agg.Buckets[2].DocCount != 7 {
- t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount)
- }
-}
-
-func TestAggsSubAggregates(t *testing.T) {
- rs := `{
- "users" : {
- "doc_count_error_upper_bound" : 1,
- "sum_other_doc_count" : 2,
- "buckets" : [ {
- "key" : "olivere",
- "doc_count" : 2,
- "ts" : {
- "buckets" : [ {
- "key_as_string" : "2012-01-01T00:00:00.000Z",
- "key" : 1325376000000,
- "doc_count" : 2
- } ]
- }
- }, {
- "key" : "sandrae",
- "doc_count" : 1,
- "ts" : {
- "buckets" : [ {
- "key_as_string" : "2011-01-01T00:00:00.000Z",
- "key" : 1293840000000,
- "doc_count" : 1
- } ]
- }
- } ]
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(rs), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- // Access top-level aggregation
- users, found := aggs.Terms("users")
- if !found {
- t.Fatalf("expected users aggregation to be found; got: %v", found)
- }
- if users == nil {
- t.Fatalf("expected users aggregation; got: %v", users)
- }
- if users.Buckets == nil {
- t.Fatalf("expected users buckets; got: %v", users.Buckets)
- }
- if len(users.Buckets) != 2 {
- t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets))
- }
- if users.Buckets[0].Key != "olivere" {
- t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key)
- }
- if users.Buckets[0].DocCount != 2 {
- t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount)
- }
- if users.Buckets[1].Key != "sandrae" {
- t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key)
- }
- if users.Buckets[1].DocCount != 1 {
- t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount)
- }
-
- // Access sub-aggregation
- ts, found := users.Buckets[0].DateHistogram("ts")
- if !found {
- t.Fatalf("expected ts aggregation to be found; got: %v", found)
- }
- if ts == nil {
- t.Fatalf("expected ts aggregation; got: %v", ts)
- }
- if ts.Buckets == nil {
- t.Fatalf("expected ts buckets; got: %v", ts.Buckets)
- }
- if len(ts.Buckets) != 1 {
- t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets))
- }
- if ts.Buckets[0].Key != 1325376000000 {
- t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key)
- }
- if ts.Buckets[0].KeyAsString == nil {
- t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString)
- }
- if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" {
- t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString)
- }
-}
-
-func TestAggsPipelineAvgBucket(t *testing.T) {
- s := `{
- "avg_monthly_sales" : {
- "value" : 328.33333333333333
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.AvgBucket("avg_monthly_sales")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(328.33333333333333) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(328.33333333333333), *agg.Value)
- }
-}
-
-func TestAggsPipelineSumBucket(t *testing.T) {
- s := `{
- "sum_monthly_sales" : {
- "value" : 985
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.SumBucket("sum_monthly_sales")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(985) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(985), *agg.Value)
- }
-}
-
-func TestAggsPipelineMaxBucket(t *testing.T) {
- s := `{
- "max_monthly_sales" : {
- "keys": ["2015/01/01 00:00:00"],
- "value" : 550
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.MaxBucket("max_monthly_sales")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if len(agg.Keys) != 1 {
- t.Fatalf("expected 1 key; got: %d", len(agg.Keys))
- }
- if got, want := agg.Keys[0], "2015/01/01 00:00:00"; got != want {
- t.Fatalf("expected key %q; got: %v (%T)", want, got, got)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(550) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value)
- }
-}
-
-func TestAggsPipelineMinBucket(t *testing.T) {
- s := `{
- "min_monthly_sales" : {
- "keys": ["2015/02/01 00:00:00"],
- "value" : 60
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.MinBucket("min_monthly_sales")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if len(agg.Keys) != 1 {
- t.Fatalf("expected 1 key; got: %d", len(agg.Keys))
- }
- if got, want := agg.Keys[0], "2015/02/01 00:00:00"; got != want {
- t.Fatalf("expected key %q; got: %v (%T)", want, got, got)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(60) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(60), *agg.Value)
- }
-}
-
-func TestAggsPipelineMovAvg(t *testing.T) {
- s := `{
- "the_movavg" : {
- "value" : 12.0
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.MovAvg("the_movavg")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(12.0) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(12.0), *agg.Value)
- }
-}
-
-func TestAggsPipelineDerivative(t *testing.T) {
- s := `{
- "sales_deriv" : {
- "value" : 315
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Derivative("sales_deriv")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(315) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(315), *agg.Value)
- }
-}
-
-func TestAggsPipelinePercentilesBucket(t *testing.T) {
- s := `{
- "sales_percentiles": {
- "values": {
- "25.0": 100,
- "50.0": 200,
- "75.0": 300
- }
- }
-}`
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.PercentilesBucket("sales_percentiles")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if len(agg.Values) != 3 {
- t.Fatalf("expected aggregation map with three entries; got: %v", agg.Values)
- }
-}
-
-func TestAggsPipelineStatsBucket(t *testing.T) {
- s := `{
- "stats_monthly_sales": {
- "count": 3,
- "min": 60.0,
- "max": 550.0,
- "avg": 328.3333333333333,
- "sum": 985.0
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.StatsBucket("stats_monthly_sales")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Count != 3 {
- t.Fatalf("expected aggregation count = %v; got: %v", 3, agg.Count)
- }
- if agg.Min == nil {
- t.Fatalf("expected aggregation min != nil; got: %v", agg.Min)
- }
- if *agg.Min != float64(60.0) {
- t.Fatalf("expected aggregation min = %v; got: %v", float64(60.0), *agg.Min)
- }
- if agg.Max == nil {
- t.Fatalf("expected aggregation max != nil; got: %v", agg.Max)
- }
- if *agg.Max != float64(550.0) {
- t.Fatalf("expected aggregation max = %v; got: %v", float64(550.0), *agg.Max)
- }
- if agg.Avg == nil {
- t.Fatalf("expected aggregation avg != nil; got: %v", agg.Avg)
- }
- if *agg.Avg != float64(328.3333333333333) {
- t.Fatalf("expected aggregation average = %v; got: %v", float64(328.3333333333333), *agg.Avg)
- }
- if agg.Sum == nil {
- t.Fatalf("expected aggregation sum != nil; got: %v", agg.Sum)
- }
- if *agg.Sum != float64(985.0) {
- t.Fatalf("expected aggregation sum = %v; got: %v", float64(985.0), *agg.Sum)
- }
-}
-
-func TestAggsPipelineCumulativeSum(t *testing.T) {
- s := `{
- "cumulative_sales" : {
- "value" : 550
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.CumulativeSum("cumulative_sales")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(550) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value)
- }
-}
-
-func TestAggsPipelineBucketScript(t *testing.T) {
- s := `{
- "t-shirt-percentage" : {
- "value" : 20
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.BucketScript("t-shirt-percentage")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(20) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value)
- }
-}
-
-func TestAggsPipelineSerialDiff(t *testing.T) {
- s := `{
- "the_diff" : {
- "value" : -722.0
- }
-}`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.SerialDiff("the_diff")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if agg.Value == nil {
- t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
- }
- if *agg.Value != float64(-722.0) {
- t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value)
- }
-}
-
-func TestAggsComposite(t *testing.T) {
- s := `{
- "the_composite" : {
- "buckets" : [
- {
- "key" : {
- "composite_users" : "olivere",
- "composite_retweets" : 0.0,
- "composite_created" : 1349856720000
- },
- "doc_count" : 1
- },
- {
- "key" : {
- "composite_users" : "olivere",
- "composite_retweets" : 108.0,
- "composite_created" : 1355333880000
- },
- "doc_count" : 1
- },
- {
- "key" : {
- "composite_users" : "sandrae",
- "composite_retweets" : 12.0,
- "composite_created" : 1321009080000
- },
- "doc_count" : 1
- }
- ]
- }
- }`
-
- aggs := new(Aggregations)
- err := json.Unmarshal([]byte(s), &aggs)
- if err != nil {
- t.Fatalf("expected no error decoding; got: %v", err)
- }
-
- agg, found := aggs.Composite("the_composite")
- if !found {
- t.Fatalf("expected aggregation to be found; got: %v", found)
- }
- if agg == nil {
- t.Fatalf("expected aggregation != nil; got: %v", agg)
- }
- if want, have := 3, len(agg.Buckets); want != have {
- t.Fatalf("expected aggregation buckets length = %v; got: %v", want, have)
- }
-
- // 1st bucket
- bucket := agg.Buckets[0]
- if want, have := int64(1), bucket.DocCount; want != have {
- t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have)
- }
- if want, have := 3, len(bucket.Key); want != have {
- t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have)
- }
- v, found := bucket.Key["composite_users"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_users")
- }
- s, ok := v.(string)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := "olivere", s; want != have {
- t.Fatalf("expected to find bucket key value %q; got: %q", want, have)
- }
- v, found = bucket.Key["composite_retweets"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_retweets")
- }
- f, ok := v.(float64)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := 0.0, f; want != have {
- t.Fatalf("expected to find bucket key value %v; got: %v", want, have)
- }
- v, found = bucket.Key["composite_created"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_created")
- }
- f, ok = v.(float64)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := 1349856720000.0, f; want != have {
- t.Fatalf("expected to find bucket key value %v; got: %v", want, have)
- }
-
- // 2nd bucket
- bucket = agg.Buckets[1]
- if want, have := int64(1), bucket.DocCount; want != have {
- t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have)
- }
- if want, have := 3, len(bucket.Key); want != have {
- t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have)
- }
- v, found = bucket.Key["composite_users"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_users")
- }
- s, ok = v.(string)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := "olivere", s; want != have {
- t.Fatalf("expected to find bucket key value %q; got: %q", want, have)
- }
- v, found = bucket.Key["composite_retweets"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_retweets")
- }
- f, ok = v.(float64)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := 108.0, f; want != have {
- t.Fatalf("expected to find bucket key value %v; got: %v", want, have)
- }
- v, found = bucket.Key["composite_created"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_created")
- }
- f, ok = v.(float64)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := 1355333880000.0, f; want != have {
- t.Fatalf("expected to find bucket key value %v; got: %v", want, have)
- }
-
- // 3rd bucket
- bucket = agg.Buckets[2]
- if want, have := int64(1), bucket.DocCount; want != have {
- t.Fatalf("expected aggregation bucket doc count = %v; got: %v", want, have)
- }
- if want, have := 3, len(bucket.Key); want != have {
- t.Fatalf("expected aggregation bucket key length = %v; got: %v", want, have)
- }
- v, found = bucket.Key["composite_users"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_users")
- }
- s, ok = v.(string)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := "sandrae", s; want != have {
- t.Fatalf("expected to find bucket key value %q; got: %q", want, have)
- }
- v, found = bucket.Key["composite_retweets"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_retweets")
- }
- f, ok = v.(float64)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := 12.0, f; want != have {
- t.Fatalf("expected to find bucket key value %v; got: %v", want, have)
- }
- v, found = bucket.Key["composite_created"]
- if !found {
- t.Fatalf("expected to find bucket key %q", "composite_created")
- }
- f, ok = v.(float64)
- if !ok {
- t.Fatalf("expected to have bucket key of type string; got: %T", v)
- }
- if want, have := 1321009080000.0, f; want != have {
- t.Fatalf("expected to find bucket key value %v; got: %v", want, have)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_collapse_builder.go b/vendor/github.com/olivere/elastic/search_collapse_builder.go
deleted file mode 100644
index b3c628ba3..000000000
--- a/vendor/github.com/olivere/elastic/search_collapse_builder.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// CollapseBuilder enables field collapsing on a search request.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-collapse.html
-// for details.
-type CollapseBuilder struct {
- field string
- innerHit *InnerHit
- maxConcurrentGroupRequests *int
-}
-
-// NewCollapseBuilder creates a new CollapseBuilder.
-func NewCollapseBuilder(field string) *CollapseBuilder {
- return &CollapseBuilder{field: field}
-}
-
-// Field to collapse.
-func (b *CollapseBuilder) Field(field string) *CollapseBuilder {
- b.field = field
- return b
-}
-
-// InnerHit option to expand the collapsed results.
-func (b *CollapseBuilder) InnerHit(innerHit *InnerHit) *CollapseBuilder {
- b.innerHit = innerHit
- return b
-}
-
-// MaxConcurrentGroupRequests is the maximum number of group requests that are
-// allowed to be ran concurrently in the inner_hits phase.
-func (b *CollapseBuilder) MaxConcurrentGroupRequests(max int) *CollapseBuilder {
- b.maxConcurrentGroupRequests = &max
- return b
-}
-
-// Source generates the JSON serializable fragment for the CollapseBuilder.
-func (b *CollapseBuilder) Source() (interface{}, error) {
- // {
- // "field": "user",
- // "inner_hits": {
- // "name": "last_tweets",
- // "size": 5,
- // "sort": [{ "date": "asc" }]
- // },
- // "max_concurrent_group_searches": 4
- // }
- src := map[string]interface{}{
- "field": b.field,
- }
-
- if b.innerHit != nil {
- hits, err := b.innerHit.Source()
- if err != nil {
- return nil, err
- }
- src["inner_hits"] = hits
- }
-
- if b.maxConcurrentGroupRequests != nil {
- src["max_concurrent_group_searches"] = *b.maxConcurrentGroupRequests
- }
-
- return src, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_collapse_builder_test.go b/vendor/github.com/olivere/elastic/search_collapse_builder_test.go
deleted file mode 100644
index 0b74fadab..000000000
--- a/vendor/github.com/olivere/elastic/search_collapse_builder_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestCollapseBuilderSource(t *testing.T) {
- b := NewCollapseBuilder("user").
- InnerHit(NewInnerHit().Name("last_tweets").Size(5).Sort("date", true)).
- MaxConcurrentGroupRequests(4)
- src, err := b.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"field":"user","inner_hits":{"name":"last_tweets","size":5,"sort":[{"date":{"order":"asc"}}]},"max_concurrent_group_searches":4}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_bool.go b/vendor/github.com/olivere/elastic/search_queries_bool.go
deleted file mode 100644
index a1ff17596..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_bool.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "fmt"
-
-// A bool query matches documents matching boolean
-// combinations of other queries.
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-bool-query.html
-type BoolQuery struct {
- Query
- mustClauses []Query
- mustNotClauses []Query
- filterClauses []Query
- shouldClauses []Query
- boost *float64
- minimumShouldMatch string
- adjustPureNegative *bool
- queryName string
-}
-
-// Creates a new bool query.
-func NewBoolQuery() *BoolQuery {
- return &BoolQuery{
- mustClauses: make([]Query, 0),
- mustNotClauses: make([]Query, 0),
- filterClauses: make([]Query, 0),
- shouldClauses: make([]Query, 0),
- }
-}
-
-func (q *BoolQuery) Must(queries ...Query) *BoolQuery {
- q.mustClauses = append(q.mustClauses, queries...)
- return q
-}
-
-func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery {
- q.mustNotClauses = append(q.mustNotClauses, queries...)
- return q
-}
-
-func (q *BoolQuery) Filter(filters ...Query) *BoolQuery {
- q.filterClauses = append(q.filterClauses, filters...)
- return q
-}
-
-func (q *BoolQuery) Should(queries ...Query) *BoolQuery {
- q.shouldClauses = append(q.shouldClauses, queries...)
- return q
-}
-
-func (q *BoolQuery) Boost(boost float64) *BoolQuery {
- q.boost = &boost
- return q
-}
-
-func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery {
- q.minimumShouldMatch = minimumShouldMatch
- return q
-}
-
-func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery {
- q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch)
- return q
-}
-
-func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery {
- q.adjustPureNegative = &adjustPureNegative
- return q
-}
-
-func (q *BoolQuery) QueryName(queryName string) *BoolQuery {
- q.queryName = queryName
- return q
-}
-
-// Creates the query source for the bool query.
-func (q *BoolQuery) Source() (interface{}, error) {
- // {
- // "bool" : {
- // "must" : {
- // "term" : { "user" : "kimchy" }
- // },
- // "must_not" : {
- // "range" : {
- // "age" : { "from" : 10, "to" : 20 }
- // }
- // },
- // "filter" : [
- // ...
- // ]
- // "should" : [
- // {
- // "term" : { "tag" : "wow" }
- // },
- // {
- // "term" : { "tag" : "elasticsearch" }
- // }
- // ],
- // "minimum_should_match" : 1,
- // "boost" : 1.0
- // }
- // }
-
- query := make(map[string]interface{})
-
- boolClause := make(map[string]interface{})
- query["bool"] = boolClause
-
- // must
- if len(q.mustClauses) == 1 {
- src, err := q.mustClauses[0].Source()
- if err != nil {
- return nil, err
- }
- boolClause["must"] = src
- } else if len(q.mustClauses) > 1 {
- var clauses []interface{}
- for _, subQuery := range q.mustClauses {
- src, err := subQuery.Source()
- if err != nil {
- return nil, err
- }
- clauses = append(clauses, src)
- }
- boolClause["must"] = clauses
- }
-
- // must_not
- if len(q.mustNotClauses) == 1 {
- src, err := q.mustNotClauses[0].Source()
- if err != nil {
- return nil, err
- }
- boolClause["must_not"] = src
- } else if len(q.mustNotClauses) > 1 {
- var clauses []interface{}
- for _, subQuery := range q.mustNotClauses {
- src, err := subQuery.Source()
- if err != nil {
- return nil, err
- }
- clauses = append(clauses, src)
- }
- boolClause["must_not"] = clauses
- }
-
- // filter
- if len(q.filterClauses) == 1 {
- src, err := q.filterClauses[0].Source()
- if err != nil {
- return nil, err
- }
- boolClause["filter"] = src
- } else if len(q.filterClauses) > 1 {
- var clauses []interface{}
- for _, subQuery := range q.filterClauses {
- src, err := subQuery.Source()
- if err != nil {
- return nil, err
- }
- clauses = append(clauses, src)
- }
- boolClause["filter"] = clauses
- }
-
- // should
- if len(q.shouldClauses) == 1 {
- src, err := q.shouldClauses[0].Source()
- if err != nil {
- return nil, err
- }
- boolClause["should"] = src
- } else if len(q.shouldClauses) > 1 {
- var clauses []interface{}
- for _, subQuery := range q.shouldClauses {
- src, err := subQuery.Source()
- if err != nil {
- return nil, err
- }
- clauses = append(clauses, src)
- }
- boolClause["should"] = clauses
- }
-
- if q.boost != nil {
- boolClause["boost"] = *q.boost
- }
- if q.minimumShouldMatch != "" {
- boolClause["minimum_should_match"] = q.minimumShouldMatch
- }
- if q.adjustPureNegative != nil {
- boolClause["adjust_pure_negative"] = *q.adjustPureNegative
- }
- if q.queryName != "" {
- boolClause["_name"] = q.queryName
- }
-
- return query, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_bool_test.go b/vendor/github.com/olivere/elastic/search_queries_bool_test.go
deleted file mode 100644
index cdcc38de1..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_bool_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestBoolQuery(t *testing.T) {
- q := NewBoolQuery()
- q = q.Must(NewTermQuery("tag", "wow"))
- q = q.MustNot(NewRangeQuery("age").From(10).To(20))
- q = q.Filter(NewTermQuery("account", "1"))
- q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag"))
- q = q.Boost(10)
- q = q.QueryName("Test")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"bool":{"_name":"Test","boost":10,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_boosting.go b/vendor/github.com/olivere/elastic/search_queries_boosting.go
deleted file mode 100644
index 0060a30a8..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_boosting.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// A boosting query can be used to effectively
-// demote results that match a given query.
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-boosting-query.html
-type BoostingQuery struct {
- Query
- positiveClause Query
- negativeClause Query
- negativeBoost *float64
- boost *float64
-}
-
-// Creates a new boosting query.
-func NewBoostingQuery() *BoostingQuery {
- return &BoostingQuery{}
-}
-
-func (q *BoostingQuery) Positive(positive Query) *BoostingQuery {
- q.positiveClause = positive
- return q
-}
-
-func (q *BoostingQuery) Negative(negative Query) *BoostingQuery {
- q.negativeClause = negative
- return q
-}
-
-func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery {
- q.negativeBoost = &negativeBoost
- return q
-}
-
-func (q *BoostingQuery) Boost(boost float64) *BoostingQuery {
- q.boost = &boost
- return q
-}
-
-// Creates the query source for the boosting query.
-func (q *BoostingQuery) Source() (interface{}, error) {
- // {
- // "boosting" : {
- // "positive" : {
- // "term" : {
- // "field1" : "value1"
- // }
- // },
- // "negative" : {
- // "term" : {
- // "field2" : "value2"
- // }
- // },
- // "negative_boost" : 0.2
- // }
- // }
-
- query := make(map[string]interface{})
-
- boostingClause := make(map[string]interface{})
- query["boosting"] = boostingClause
-
- // Negative and positive clause as well as negative boost
- // are mandatory in the Java client.
-
- // positive
- if q.positiveClause != nil {
- src, err := q.positiveClause.Source()
- if err != nil {
- return nil, err
- }
- boostingClause["positive"] = src
- }
-
- // negative
- if q.negativeClause != nil {
- src, err := q.negativeClause.Source()
- if err != nil {
- return nil, err
- }
- boostingClause["negative"] = src
- }
-
- if q.negativeBoost != nil {
- boostingClause["negative_boost"] = *q.negativeBoost
- }
-
- if q.boost != nil {
- boostingClause["boost"] = *q.boost
- }
-
- return query, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_boosting_test.go b/vendor/github.com/olivere/elastic/search_queries_boosting_test.go
deleted file mode 100644
index 6c7f263f4..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_boosting_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestBoostingQuery(t *testing.T) {
- q := NewBoostingQuery()
- q = q.Positive(NewTermQuery("tag", "wow"))
- q = q.Negative(NewRangeQuery("age").From(10).To(20))
- q = q.NegativeBoost(0.2)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"boosting":{"negative":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"negative_boost":0.2,"positive":{"term":{"tag":"wow"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_common_terms.go b/vendor/github.com/olivere/elastic/search_queries_common_terms.go
deleted file mode 100644
index 93a03de54..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_common_terms.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// CommonTermsQuery is a modern alternative to stopwords
-// which improves the precision and recall of search results
-// (by taking stopwords into account), without sacrificing performance.
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-common-terms-query.html
-type CommonTermsQuery struct {
- Query
- name string
- text interface{}
- cutoffFreq *float64
- highFreq *float64
- highFreqOp string
- highFreqMinimumShouldMatch string
- lowFreq *float64
- lowFreqOp string
- lowFreqMinimumShouldMatch string
- analyzer string
- boost *float64
- queryName string
-}
-
-// NewCommonTermsQuery creates and initializes a new common terms query.
-func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery {
- return &CommonTermsQuery{name: name, text: text}
-}
-
-func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery {
- q.cutoffFreq = &f
- return q
-}
-
-func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery {
- q.highFreq = &f
- return q
-}
-
-func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery {
- q.highFreqOp = op
- return q
-}
-
-func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery {
- q.highFreqMinimumShouldMatch = minShouldMatch
- return q
-}
-
-func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery {
- q.lowFreq = &f
- return q
-}
-
-func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery {
- q.lowFreqOp = op
- return q
-}
-
-func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery {
- q.lowFreqMinimumShouldMatch = minShouldMatch
- return q
-}
-
-func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery {
- q.analyzer = analyzer
- return q
-}
-
-func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery {
- q.boost = &boost
- return q
-}
-
-func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery {
- q.queryName = queryName
- return q
-}
-
-// Creates the query source for the common query.
-func (q *CommonTermsQuery) Source() (interface{}, error) {
- // {
- // "common": {
- // "body": {
- // "query": "this is bonsai cool",
- // "cutoff_frequency": 0.001
- // }
- // }
- // }
- source := make(map[string]interface{})
- body := make(map[string]interface{})
- query := make(map[string]interface{})
-
- source["common"] = body
- body[q.name] = query
- query["query"] = q.text
-
- if q.cutoffFreq != nil {
- query["cutoff_frequency"] = *q.cutoffFreq
- }
- if q.highFreq != nil {
- query["high_freq"] = *q.highFreq
- }
- if q.highFreqOp != "" {
- query["high_freq_operator"] = q.highFreqOp
- }
- if q.lowFreq != nil {
- query["low_freq"] = *q.lowFreq
- }
- if q.lowFreqOp != "" {
- query["low_freq_operator"] = q.lowFreqOp
- }
- if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" {
- mm := make(map[string]interface{})
- if q.lowFreqMinimumShouldMatch != "" {
- mm["low_freq"] = q.lowFreqMinimumShouldMatch
- }
- if q.highFreqMinimumShouldMatch != "" {
- mm["high_freq"] = q.highFreqMinimumShouldMatch
- }
- query["minimum_should_match"] = mm
- }
- if q.analyzer != "" {
- query["analyzer"] = q.analyzer
- }
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_common_terms_test.go b/vendor/github.com/olivere/elastic/search_queries_common_terms_test.go
deleted file mode 100644
index e841e7731..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_common_terms_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- _ "net/http"
- "testing"
-)
-
-func TestCommonTermsQuery(t *testing.T) {
- q := NewCommonTermsQuery("message", "Golang").CutoffFrequency(0.001)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"common":{"message":{"cutoff_frequency":0.001,"query":"Golang"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchQueriesCommonTermsQuery(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Common terms query
- q := NewCommonTermsQuery("message", "Golang")
- searchResult, err := client.Search().Index(testIndexName).Query(q).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 1 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 1 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_constant_score.go b/vendor/github.com/olivere/elastic/search_queries_constant_score.go
deleted file mode 100644
index 285d91817..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_constant_score.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// ConstantScoreQuery is a query that wraps a filter and simply returns
-// a constant score equal to the query boost for every document in the filter.
-//
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-constant-score-query.html
-type ConstantScoreQuery struct {
- filter Query
- boost *float64
-}
-
-// ConstantScoreQuery creates and initializes a new constant score query.
-func NewConstantScoreQuery(filter Query) *ConstantScoreQuery {
- return &ConstantScoreQuery{
- filter: filter,
- }
-}
-
-// Boost sets the boost for this query. Documents matching this query
-// will (in addition to the normal weightings) have their score multiplied
-// by the boost provided.
-func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery {
- q.boost = &boost
- return q
-}
-
-// Source returns the query source.
-func (q *ConstantScoreQuery) Source() (interface{}, error) {
- // "constant_score" : {
- // "filter" : {
- // ....
- // },
- // "boost" : 1.5
- // }
-
- query := make(map[string]interface{})
-
- params := make(map[string]interface{})
- query["constant_score"] = params
-
- // filter
- src, err := q.filter.Source()
- if err != nil {
- return nil, err
- }
- params["filter"] = src
-
- // boost
- if q.boost != nil {
- params["boost"] = *q.boost
- }
-
- return query, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_constant_score_test.go b/vendor/github.com/olivere/elastic/search_queries_constant_score_test.go
deleted file mode 100644
index 6508a91fb..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_constant_score_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestConstantScoreQuery(t *testing.T) {
- q := NewConstantScoreQuery(NewTermQuery("user", "kimchy")).Boost(1.2)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"constant_score":{"boost":1.2,"filter":{"term":{"user":"kimchy"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_dis_max.go b/vendor/github.com/olivere/elastic/search_queries_dis_max.go
deleted file mode 100644
index 7a4f53a97..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_dis_max.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// DisMaxQuery is a query that generates the union of documents produced by
-// its subqueries, and that scores each document with the maximum score
-// for that document as produced by any subquery, plus a tie breaking
-// increment for any additional matching subqueries.
-//
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-dis-max-query.html
-type DisMaxQuery struct {
- queries []Query
- boost *float64
- tieBreaker *float64
- queryName string
-}
-
-// NewDisMaxQuery creates and initializes a new dis max query.
-func NewDisMaxQuery() *DisMaxQuery {
- return &DisMaxQuery{
- queries: make([]Query, 0),
- }
-}
-
-// Query adds one or more queries to the dis max query.
-func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery {
- q.queries = append(q.queries, queries...)
- return q
-}
-
-// Boost sets the boost for this query. Documents matching this query will
-// (in addition to the normal weightings) have their score multiplied by
-// the boost provided.
-func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery {
- q.boost = &boost
- return q
-}
-
-// TieBreaker is the factor by which the score of each non-maximum disjunct
-// for a document is multiplied with and added into the final score.
-//
-// If non-zero, the value should be small, on the order of 0.1, which says
-// that 10 occurrences of word in a lower-scored field that is also in a
-// higher scored field is just as good as a unique word in the lower scored
-// field (i.e., one that is not in any higher scored field).
-func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery {
- q.tieBreaker = &tieBreaker
- return q
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched filters per hit.
-func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns the JSON serializable content for this query.
-func (q *DisMaxQuery) Source() (interface{}, error) {
- // {
- // "dis_max" : {
- // "tie_breaker" : 0.7,
- // "boost" : 1.2,
- // "queries" : {
- // {
- // "term" : { "age" : 34 }
- // },
- // {
- // "term" : { "age" : 35 }
- // }
- // ]
- // }
- // }
-
- query := make(map[string]interface{})
- params := make(map[string]interface{})
- query["dis_max"] = params
-
- if q.tieBreaker != nil {
- params["tie_breaker"] = *q.tieBreaker
- }
- if q.boost != nil {
- params["boost"] = *q.boost
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
-
- // queries
- var clauses []interface{}
- for _, subQuery := range q.queries {
- src, err := subQuery.Source()
- if err != nil {
- return nil, err
- }
- clauses = append(clauses, src)
- }
- params["queries"] = clauses
-
- return query, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_dis_max_test.go b/vendor/github.com/olivere/elastic/search_queries_dis_max_test.go
deleted file mode 100644
index 76ddfb079..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_dis_max_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestDisMaxQuery(t *testing.T) {
- q := NewDisMaxQuery()
- q = q.Query(NewTermQuery("age", 34), NewTermQuery("age", 35)).Boost(1.2).TieBreaker(0.7)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"dis_max":{"boost":1.2,"queries":[{"term":{"age":34}},{"term":{"age":35}}],"tie_breaker":0.7}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_exists.go b/vendor/github.com/olivere/elastic/search_queries_exists.go
deleted file mode 100644
index ac7378bad..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_exists.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// ExistsQuery is a query that only matches on documents that the field
-// has a value in them.
-//
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-exists-query.html
-type ExistsQuery struct {
- name string
- queryName string
-}
-
-// NewExistsQuery creates and initializes a new dis max query.
-func NewExistsQuery(name string) *ExistsQuery {
- return &ExistsQuery{
- name: name,
- }
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched queries per hit.
-func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns the JSON serializable content for this query.
-func (q *ExistsQuery) Source() (interface{}, error) {
- // {
- // "exists" : {
- // "field" : "user"
- // }
- // }
-
- query := make(map[string]interface{})
- params := make(map[string]interface{})
- query["exists"] = params
-
- params["field"] = q.name
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
-
- return query, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_exists_test.go b/vendor/github.com/olivere/elastic/search_queries_exists_test.go
deleted file mode 100644
index f2d047087..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_exists_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestExistsQuery(t *testing.T) {
- q := NewExistsQuery("user")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"exists":{"field":"user"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fsq.go b/vendor/github.com/olivere/elastic/search_queries_fsq.go
deleted file mode 100644
index 4cabd9bd9..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_fsq.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// FunctionScoreQuery allows you to modify the score of documents that
-// are retrieved by a query. This can be useful if, for example,
-// a score function is computationally expensive and it is sufficient
-// to compute the score on a filtered set of documents.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
-type FunctionScoreQuery struct {
- query Query
- filter Query
- boost *float64
- maxBoost *float64
- scoreMode string
- boostMode string
- filters []Query
- scoreFuncs []ScoreFunction
- minScore *float64
- weight *float64
-}
-
-// NewFunctionScoreQuery creates and initializes a new function score query.
-func NewFunctionScoreQuery() *FunctionScoreQuery {
- return &FunctionScoreQuery{
- filters: make([]Query, 0),
- scoreFuncs: make([]ScoreFunction, 0),
- }
-}
-
-// Query sets the query for the function score query.
-func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery {
- q.query = query
- return q
-}
-
-// Filter sets the filter for the function score query.
-func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery {
- q.filter = filter
- return q
-}
-
-// Add adds a score function that will execute on all the documents
-// matching the filter.
-func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery {
- q.filters = append(q.filters, filter)
- q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
- return q
-}
-
-// AddScoreFunc adds a score function that will execute the function on all documents.
-func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery {
- q.filters = append(q.filters, nil)
- q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
- return q
-}
-
-// ScoreMode defines how results of individual score functions will be aggregated.
-// Can be first, avg, max, sum, min, or multiply.
-func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery {
- q.scoreMode = scoreMode
- return q
-}
-
-// BoostMode defines how the combined result of score functions will
-// influence the final score together with the sub query score.
-func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery {
- q.boostMode = boostMode
- return q
-}
-
-// MaxBoost is the maximum boost that will be applied by function score.
-func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery {
- q.maxBoost = &maxBoost
- return q
-}
-
-// Boost sets the boost for this query. Documents matching this query will
-// (in addition to the normal weightings) have their score multiplied by the
-// boost provided.
-func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery {
- q.boost = &boost
- return q
-}
-
-// MinScore sets the minimum score.
-func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery {
- q.minScore = &minScore
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *FunctionScoreQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["function_score"] = query
-
- if q.query != nil {
- src, err := q.query.Source()
- if err != nil {
- return nil, err
- }
- query["query"] = src
- }
- if q.filter != nil {
- src, err := q.filter.Source()
- if err != nil {
- return nil, err
- }
- query["filter"] = src
- }
-
- if len(q.filters) == 1 && q.filters[0] == nil {
- // Weight needs to be serialized on this level.
- if weight := q.scoreFuncs[0].GetWeight(); weight != nil {
- query["weight"] = weight
- }
- // Serialize the score function
- src, err := q.scoreFuncs[0].Source()
- if err != nil {
- return nil, err
- }
- query[q.scoreFuncs[0].Name()] = src
- } else {
- funcs := make([]interface{}, len(q.filters))
- for i, filter := range q.filters {
- hsh := make(map[string]interface{})
- if filter != nil {
- src, err := filter.Source()
- if err != nil {
- return nil, err
- }
- hsh["filter"] = src
- }
- // Weight needs to be serialized on this level.
- if weight := q.scoreFuncs[i].GetWeight(); weight != nil {
- hsh["weight"] = weight
- }
- // Serialize the score function
- src, err := q.scoreFuncs[i].Source()
- if err != nil {
- return nil, err
- }
- hsh[q.scoreFuncs[i].Name()] = src
- funcs[i] = hsh
- }
- query["functions"] = funcs
- }
-
- if q.scoreMode != "" {
- query["score_mode"] = q.scoreMode
- }
- if q.boostMode != "" {
- query["boost_mode"] = q.boostMode
- }
- if q.maxBoost != nil {
- query["max_boost"] = *q.maxBoost
- }
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.minScore != nil {
- query["min_score"] = *q.minScore
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go b/vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go
deleted file mode 100644
index 84cc52de9..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "strings"
-)
-
-// ScoreFunction is used in combination with the Function Score Query.
-type ScoreFunction interface {
- Name() string
- GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery
- Source() (interface{}, error)
-}
-
-// -- Exponential Decay --
-
-// ExponentialDecayFunction builds an exponential decay score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
-// for details.
-type ExponentialDecayFunction struct {
- fieldName string
- origin interface{}
- scale interface{}
- decay *float64
- offset interface{}
- multiValueMode string
- weight *float64
-}
-
-// NewExponentialDecayFunction creates a new ExponentialDecayFunction.
-func NewExponentialDecayFunction() *ExponentialDecayFunction {
- return &ExponentialDecayFunction{}
-}
-
-// Name represents the JSON field name under which the output of Source
-// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
-func (fn *ExponentialDecayFunction) Name() string {
- return "exp"
-}
-
-// FieldName specifies the name of the field to which this decay function is applied to.
-func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction {
- fn.fieldName = fieldName
- return fn
-}
-
-// Origin defines the "central point" by which the decay function calculates
-// "distance".
-func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction {
- fn.origin = origin
- return fn
-}
-
-// Scale defines the scale to be used with Decay.
-func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction {
- fn.scale = scale
- return fn
-}
-
-// Decay defines how documents are scored at the distance given a Scale.
-// If no decay is defined, documents at the distance Scale will be scored 0.5.
-func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction {
- fn.decay = &decay
- return fn
-}
-
-// Offset, if defined, computes the decay function only for a distance
-// greater than the defined offset.
-func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction {
- fn.offset = offset
- return fn
-}
-
-// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
-// for details.
-func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction {
- fn.weight = &weight
- return fn
-}
-
-// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
-// Returns nil if weight is not specified.
-func (fn *ExponentialDecayFunction) GetWeight() *float64 {
- return fn.weight
-}
-
-// MultiValueMode specifies how the decay function should be calculated
-// on a field that has multiple values.
-// Valid modes are: min, max, avg, and sum.
-func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction {
- fn.multiValueMode = mode
- return fn
-}
-
-// Source returns the serializable JSON data of this score function.
-func (fn *ExponentialDecayFunction) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source[fn.fieldName] = params
- if fn.origin != nil {
- params["origin"] = fn.origin
- }
- params["scale"] = fn.scale
- if fn.decay != nil && *fn.decay > 0 {
- params["decay"] = *fn.decay
- }
- if fn.offset != nil {
- params["offset"] = fn.offset
- }
- if fn.multiValueMode != "" {
- source["multi_value_mode"] = fn.multiValueMode
- }
- return source, nil
-}
-
-// -- Gauss Decay --
-
-// GaussDecayFunction builds a gauss decay score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
-// for details.
-type GaussDecayFunction struct {
- fieldName string
- origin interface{}
- scale interface{}
- decay *float64
- offset interface{}
- multiValueMode string
- weight *float64
-}
-
-// NewGaussDecayFunction returns a new GaussDecayFunction.
-func NewGaussDecayFunction() *GaussDecayFunction {
- return &GaussDecayFunction{}
-}
-
-// Name represents the JSON field name under which the output of Source
-// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
-func (fn *GaussDecayFunction) Name() string {
- return "gauss"
-}
-
-// FieldName specifies the name of the field to which this decay function is applied to.
-func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction {
- fn.fieldName = fieldName
- return fn
-}
-
-// Origin defines the "central point" by which the decay function calculates
-// "distance".
-func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction {
- fn.origin = origin
- return fn
-}
-
-// Scale defines the scale to be used with Decay.
-func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction {
- fn.scale = scale
- return fn
-}
-
-// Decay defines how documents are scored at the distance given a Scale.
-// If no decay is defined, documents at the distance Scale will be scored 0.5.
-func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction {
- fn.decay = &decay
- return fn
-}
-
-// Offset, if defined, computes the decay function only for a distance
-// greater than the defined offset.
-func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction {
- fn.offset = offset
- return fn
-}
-
-// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
-// for details.
-func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction {
- fn.weight = &weight
- return fn
-}
-
-// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
-// Returns nil if weight is not specified.
-func (fn *GaussDecayFunction) GetWeight() *float64 {
- return fn.weight
-}
-
-// MultiValueMode specifies how the decay function should be calculated
-// on a field that has multiple values.
-// Valid modes are: min, max, avg, and sum.
-func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction {
- fn.multiValueMode = mode
- return fn
-}
-
-// Source returns the serializable JSON data of this score function.
-func (fn *GaussDecayFunction) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source[fn.fieldName] = params
- if fn.origin != nil {
- params["origin"] = fn.origin
- }
- params["scale"] = fn.scale
- if fn.decay != nil && *fn.decay > 0 {
- params["decay"] = *fn.decay
- }
- if fn.offset != nil {
- params["offset"] = fn.offset
- }
- if fn.multiValueMode != "" {
- source["multi_value_mode"] = fn.multiValueMode
- }
- // Notice that the weight has to be serialized in FunctionScoreQuery.
- return source, nil
-}
-
-// -- Linear Decay --
-
-// LinearDecayFunction builds a linear decay score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
-// for details.
-type LinearDecayFunction struct {
- fieldName string
- origin interface{}
- scale interface{}
- decay *float64
- offset interface{}
- multiValueMode string
- weight *float64
-}
-
-// NewLinearDecayFunction initializes and returns a new LinearDecayFunction.
-func NewLinearDecayFunction() *LinearDecayFunction {
- return &LinearDecayFunction{}
-}
-
-// Name represents the JSON field name under which the output of Source
-// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
-func (fn *LinearDecayFunction) Name() string {
- return "linear"
-}
-
-// FieldName specifies the name of the field to which this decay function is applied to.
-func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction {
- fn.fieldName = fieldName
- return fn
-}
-
-// Origin defines the "central point" by which the decay function calculates
-// "distance".
-func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction {
- fn.origin = origin
- return fn
-}
-
-// Scale defines the scale to be used with Decay.
-func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction {
- fn.scale = scale
- return fn
-}
-
-// Decay defines how documents are scored at the distance given a Scale.
-// If no decay is defined, documents at the distance Scale will be scored 0.5.
-func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction {
- fn.decay = &decay
- return fn
-}
-
-// Offset, if defined, computes the decay function only for a distance
-// greater than the defined offset.
-func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction {
- fn.offset = offset
- return fn
-}
-
-// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
-// for details.
-func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction {
- fn.weight = &weight
- return fn
-}
-
-// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
-// Returns nil if weight is not specified.
-func (fn *LinearDecayFunction) GetWeight() *float64 {
- return fn.weight
-}
-
-// MultiValueMode specifies how the decay function should be calculated
-// on a field that has multiple values.
-// Valid modes are: min, max, avg, and sum.
-func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction {
- fn.multiValueMode = mode
- return fn
-}
-
-// GetMultiValueMode returns how the decay function should be calculated
-// on a field that has multiple values.
-// Valid modes are: min, max, avg, and sum.
-func (fn *LinearDecayFunction) GetMultiValueMode() string {
- return fn.multiValueMode
-}
-
-// Source returns the serializable JSON data of this score function.
-func (fn *LinearDecayFunction) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source[fn.fieldName] = params
- if fn.origin != nil {
- params["origin"] = fn.origin
- }
- params["scale"] = fn.scale
- if fn.decay != nil && *fn.decay > 0 {
- params["decay"] = *fn.decay
- }
- if fn.offset != nil {
- params["offset"] = fn.offset
- }
- if fn.multiValueMode != "" {
- source["multi_value_mode"] = fn.multiValueMode
- }
- // Notice that the weight has to be serialized in FunctionScoreQuery.
- return source, nil
-}
-
-// -- Script --
-
-// ScriptFunction builds a script score function. It uses a script to
-// compute or influence the score of documents that match with the inner
-// query or filter.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_script_score
-// for details.
-type ScriptFunction struct {
- script *Script
- weight *float64
-}
-
-// NewScriptFunction initializes and returns a new ScriptFunction.
-func NewScriptFunction(script *Script) *ScriptFunction {
- return &ScriptFunction{
- script: script,
- }
-}
-
-// Name represents the JSON field name under which the output of Source
-// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
-func (fn *ScriptFunction) Name() string {
- return "script_score"
-}
-
-// Script specifies the script to be executed.
-func (fn *ScriptFunction) Script(script *Script) *ScriptFunction {
- fn.script = script
- return fn
-}
-
-// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
-// for details.
-func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction {
- fn.weight = &weight
- return fn
-}
-
-// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
-// Returns nil if weight is not specified.
-func (fn *ScriptFunction) GetWeight() *float64 {
- return fn.weight
-}
-
-// Source returns the serializable JSON data of this score function.
-func (fn *ScriptFunction) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if fn.script != nil {
- src, err := fn.script.Source()
- if err != nil {
- return nil, err
- }
- source["script"] = src
- }
- // Notice that the weight has to be serialized in FunctionScoreQuery.
- return source, nil
-}
-
-// -- Field value factor --
-
-// FieldValueFactorFunction is a function score function that allows you
-// to use a field from a document to influence the score.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_field_value_factor.
-type FieldValueFactorFunction struct {
- field string
- factor *float64
- missing *float64
- weight *float64
- modifier string
-}
-
-// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction.
-func NewFieldValueFactorFunction() *FieldValueFactorFunction {
- return &FieldValueFactorFunction{}
-}
-
-// Name represents the JSON field name under which the output of Source
-// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
-func (fn *FieldValueFactorFunction) Name() string {
- return "field_value_factor"
-}
-
-// Field is the field to be extracted from the document.
-func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction {
- fn.field = field
- return fn
-}
-
-// Factor is the (optional) factor to multiply the field with. If you do not
-// specify a factor, the default is 1.
-func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction {
- fn.factor = &factor
- return fn
-}
-
-// Modifier to apply to the field value. It can be one of: none, log, log1p,
-// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none.
-func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction {
- fn.modifier = modifier
- return fn
-}
-
-// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
-// for details.
-func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction {
- fn.weight = &weight
- return fn
-}
-
-// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
-// Returns nil if weight is not specified.
-func (fn *FieldValueFactorFunction) GetWeight() *float64 {
- return fn.weight
-}
-
-// Missing is used if a document does not have that field.
-func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction {
- fn.missing = &missing
- return fn
-}
-
-// Source returns the serializable JSON data of this score function.
-func (fn *FieldValueFactorFunction) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if fn.field != "" {
- source["field"] = fn.field
- }
- if fn.factor != nil {
- source["factor"] = *fn.factor
- }
- if fn.missing != nil {
- source["missing"] = *fn.missing
- }
- if fn.modifier != "" {
- source["modifier"] = strings.ToLower(fn.modifier)
- }
- // Notice that the weight has to be serialized in FunctionScoreQuery.
- return source, nil
-}
-
-// -- Weight Factor --
-
-// WeightFactorFunction builds a weight factor function that multiplies
-// the weight to the score.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_weight
-// for details.
-type WeightFactorFunction struct {
- weight float64
-}
-
-// NewWeightFactorFunction initializes and returns a new WeightFactorFunction.
-func NewWeightFactorFunction(weight float64) *WeightFactorFunction {
- return &WeightFactorFunction{weight: weight}
-}
-
-// Name represents the JSON field name under which the output of Source
-// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
-func (fn *WeightFactorFunction) Name() string {
- return "weight"
-}
-
-// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
-// for details.
-func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction {
- fn.weight = weight
- return fn
-}
-
-// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
-// Returns nil if weight is not specified.
-func (fn *WeightFactorFunction) GetWeight() *float64 {
- return &fn.weight
-}
-
-// Source returns the serializable JSON data of this score function.
-func (fn *WeightFactorFunction) Source() (interface{}, error) {
- // Notice that the weight has to be serialized in FunctionScoreQuery.
- return fn.weight, nil
-}
-
-// -- Random --
-
-// RandomFunction builds a random score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_random
-// for details.
-type RandomFunction struct {
- seed interface{}
- weight *float64
-}
-
-// NewRandomFunction initializes and returns a new RandomFunction.
-func NewRandomFunction() *RandomFunction {
- return &RandomFunction{}
-}
-
-// Name represents the JSON field name under which the output of Source
-// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
-func (fn *RandomFunction) Name() string {
- return "random_score"
-}
-
-// Seed is documented in 1.6 as a numeric value. However, in the source code
-// of the Java client, it also accepts strings. So we accept both here, too.
-func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction {
- fn.seed = seed
- return fn
-}
-
-// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
-// for details.
-func (fn *RandomFunction) Weight(weight float64) *RandomFunction {
- fn.weight = &weight
- return fn
-}
-
-// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
-// Returns nil if weight is not specified.
-func (fn *RandomFunction) GetWeight() *float64 {
- return fn.weight
-}
-
-// Source returns the serializable JSON data of this score function.
-func (fn *RandomFunction) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if fn.seed != nil {
- source["seed"] = fn.seed
- }
- // Notice that the weight has to be serialized in FunctionScoreQuery.
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fsq_test.go b/vendor/github.com/olivere/elastic/search_queries_fsq_test.go
deleted file mode 100644
index 256752d18..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_fsq_test.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestFunctionScoreQuery(t *testing.T) {
- q := NewFunctionScoreQuery().
- Query(NewTermQuery("name.last", "banon")).
- Add(NewTermQuery("name.last", "banon"), NewWeightFactorFunction(1.5)).
- AddScoreFunc(NewWeightFactorFunction(3)).
- AddScoreFunc(NewRandomFunction()).
- Boost(3).
- MaxBoost(10).
- ScoreMode("avg")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"function_score":{"boost":3,"functions":[{"filter":{"term":{"name.last":"banon"}},"weight":1.5},{"weight":3},{"random_score":{}}],"max_boost":10,"query":{"term":{"name.last":"banon"}},"score_mode":"avg"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFunctionScoreQueryWithNilFilter(t *testing.T) {
- q := NewFunctionScoreQuery().
- Query(NewTermQuery("tag", "wow")).
- AddScoreFunc(NewRandomFunction()).
- Boost(2.0).
- MaxBoost(12.0).
- BoostMode("multiply").
- ScoreMode("max")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"function_score":{"boost":2,"boost_mode":"multiply","max_boost":12,"query":{"term":{"tag":"wow"}},"random_score":{},"score_mode":"max"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldValueFactor(t *testing.T) {
- q := NewFunctionScoreQuery().
- Query(NewTermQuery("name.last", "banon")).
- AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income")).
- Boost(2.0).
- MaxBoost(12.0).
- BoostMode("multiply").
- ScoreMode("max")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldValueFactorWithWeight(t *testing.T) {
- q := NewFunctionScoreQuery().
- Query(NewTermQuery("name.last", "banon")).
- AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)).
- Boost(2.0).
- MaxBoost(12.0).
- BoostMode("multiply").
- ScoreMode("max")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max","weight":2.5}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) {
- q := NewFunctionScoreQuery().
- Query(NewTermQuery("name.last", "banon")).
- AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)).
- AddScoreFunc(NewScriptFunction(NewScript("_score * doc['my_numeric_field'].value")).Weight(1.25)).
- AddScoreFunc(NewWeightFactorFunction(0.5)).
- Boost(2.0).
- MaxBoost(12.0).
- BoostMode("multiply").
- ScoreMode("max")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":{"source":"_score * doc['my_numeric_field'].value"}},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFunctionScoreQueryWithGaussScoreFunc(t *testing.T) {
- q := NewFunctionScoreQuery().
- Query(NewTermQuery("name.last", "banon")).
- AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"function_score":{"gauss":{"pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFunctionScoreQueryWithGaussScoreFuncAndMultiValueMode(t *testing.T) {
- q := NewFunctionScoreQuery().
- Query(NewTermQuery("name.last", "banon")).
- AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33).MultiValueMode("avg"))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"function_score":{"gauss":{"multi_value_mode":"avg","pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fuzzy.go b/vendor/github.com/olivere/elastic/search_queries_fuzzy.go
deleted file mode 100644
index 02b6c52c2..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_fuzzy.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// FuzzyQuery uses similarity based on Levenshtein edit distance for
-// string fields, and a +/- margin on numeric and date fields.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-fuzzy-query.html
-type FuzzyQuery struct {
- name string
- value interface{}
- boost *float64
- fuzziness interface{}
- prefixLength *int
- maxExpansions *int
- transpositions *bool
- rewrite string
- queryName string
-}
-
-// NewFuzzyQuery creates a new fuzzy query.
-func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery {
- q := &FuzzyQuery{
- name: name,
- value: value,
- }
- return q
-}
-
-// Boost sets the boost for this query. Documents matching this query will
-// (in addition to the normal weightings) have their score multiplied by
-// the boost provided.
-func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery {
- q.boost = &boost
- return q
-}
-
-// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings
-// like "auto", "0..1", "1..4" or "0.0..1.0".
-func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery {
- q.fuzziness = fuzziness
- return q
-}
-
-func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery {
- q.prefixLength = &prefixLength
- return q
-}
-
-func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery {
- q.maxExpansions = &maxExpansions
- return q
-}
-
-func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery {
- q.transpositions = &transpositions
- return q
-}
-
-func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery {
- q.rewrite = rewrite
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *FuzzyQuery) Source() (interface{}, error) {
- // {
- // "fuzzy" : {
- // "user" : {
- // "value" : "ki",
- // "boost" : 1.0,
- // "fuzziness" : 2,
- // "prefix_length" : 0,
- // "max_expansions" : 100
- // }
- // }
-
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["fuzzy"] = query
-
- fq := make(map[string]interface{})
- query[q.name] = fq
-
- fq["value"] = q.value
-
- if q.boost != nil {
- fq["boost"] = *q.boost
- }
- if q.transpositions != nil {
- fq["transpositions"] = *q.transpositions
- }
- if q.fuzziness != nil {
- fq["fuzziness"] = q.fuzziness
- }
- if q.prefixLength != nil {
- fq["prefix_length"] = *q.prefixLength
- }
- if q.maxExpansions != nil {
- fq["max_expansions"] = *q.maxExpansions
- }
- if q.rewrite != "" {
- fq["rewrite"] = q.rewrite
- }
- if q.queryName != "" {
- fq["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go b/vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go
deleted file mode 100644
index 89140ca23..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestFuzzyQuery(t *testing.T) {
- q := NewFuzzyQuery("user", "ki").Boost(1.5).Fuzziness(2).PrefixLength(0).MaxExpansions(100)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fuzzy":{"user":{"boost":1.5,"fuzziness":2,"max_expansions":100,"prefix_length":0,"value":"ki"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go b/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go
deleted file mode 100644
index 0418620d8..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// GeoBoundingBoxQuery allows to filter hits based on a point location using
-// a bounding box.
-//
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-bounding-box-query.html
-type GeoBoundingBoxQuery struct {
- name string
- top *float64
- left *float64
- bottom *float64
- right *float64
- typ string
- queryName string
-}
-
-// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery.
-func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery {
- return &GeoBoundingBoxQuery{
- name: name,
- }
-}
-
-func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery {
- q.top = &top
- q.left = &left
- return q
-}
-
-func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
- return q.TopLeft(point.Lat, point.Lon)
-}
-
-func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery {
- q.bottom = &bottom
- q.right = &right
- return q
-}
-
-func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
- return q.BottomRight(point.Lat, point.Lon)
-}
-
-func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery {
- q.bottom = &bottom
- q.left = &left
- return q
-}
-
-func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
- return q.BottomLeft(point.Lat, point.Lon)
-}
-
-func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery {
- q.top = &top
- q.right = &right
- return q
-}
-
-func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
- return q.TopRight(point.Lat, point.Lon)
-}
-
-// Type sets the type of executing the geo bounding box. It can be either
-// memory or indexed. It defaults to memory.
-func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery {
- q.typ = typ
- return q
-}
-
-func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *GeoBoundingBoxQuery) Source() (interface{}, error) {
- // {
- // "geo_bounding_box" : {
- // ...
- // }
- // }
-
- if q.top == nil {
- return nil, errors.New("geo_bounding_box requires top latitude to be set")
- }
- if q.bottom == nil {
- return nil, errors.New("geo_bounding_box requires bottom latitude to be set")
- }
- if q.right == nil {
- return nil, errors.New("geo_bounding_box requires right longitude to be set")
- }
- if q.left == nil {
- return nil, errors.New("geo_bounding_box requires left longitude to be set")
- }
-
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["geo_bounding_box"] = params
-
- box := make(map[string]interface{})
- box["top_left"] = []float64{*q.left, *q.top}
- box["bottom_right"] = []float64{*q.right, *q.bottom}
- params[q.name] = box
-
- if q.typ != "" {
- params["type"] = q.typ
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go b/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go
deleted file mode 100644
index f44a2364f..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGeoBoundingBoxQueryIncomplete(t *testing.T) {
- q := NewGeoBoundingBoxQuery("pin.location")
- q = q.TopLeft(40.73, -74.1)
- // no bottom and no right here
- q = q.Type("memory")
- src, err := q.Source()
- if err == nil {
- t.Fatal("expected error")
- }
- if src != nil {
- t.Fatal("expected empty source")
- }
-}
-
-func TestGeoBoundingBoxQuery(t *testing.T) {
- q := NewGeoBoundingBoxQuery("pin.location")
- q = q.TopLeft(40.73, -74.1)
- q = q.BottomRight(40.01, -71.12)
- q = q.Type("memory")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_bounding_box":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]},"type":"memory"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoBoundingBoxQueryWithGeoPoint(t *testing.T) {
- q := NewGeoBoundingBoxQuery("pin.location")
- q = q.TopLeftFromGeoPoint(GeoPointFromLatLon(40.73, -74.1))
- q = q.BottomRightFromGeoPoint(GeoPointFromLatLon(40.01, -71.12))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_bounding_box":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_distance.go b/vendor/github.com/olivere/elastic/search_queries_geo_distance.go
deleted file mode 100644
index 00e62725f..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_geo_distance.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// GeoDistanceQuery filters documents that include only hits that exists
-// within a specific distance from a geo point.
-//
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-distance-query.html
-type GeoDistanceQuery struct {
- name string
- distance string
- lat float64
- lon float64
- geohash string
- distanceType string
- queryName string
-}
-
-// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery.
-func NewGeoDistanceQuery(name string) *GeoDistanceQuery {
- return &GeoDistanceQuery{name: name}
-}
-
-func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery {
- q.lat = point.Lat
- q.lon = point.Lon
- return q
-}
-
-func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery {
- q.lat = lat
- q.lon = lon
- return q
-}
-
-func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery {
- q.lat = lat
- return q
-}
-
-func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery {
- q.lon = lon
- return q
-}
-
-func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery {
- q.geohash = geohash
- return q
-}
-
-func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery {
- q.distance = distance
- return q
-}
-
-func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery {
- q.distanceType = distanceType
- return q
-}
-
-func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *GeoDistanceQuery) Source() (interface{}, error) {
- // {
- // "geo_distance" : {
- // "distance" : "200km",
- // "pin.location" : {
- // "lat" : 40,
- // "lon" : -70
- // }
- // }
- // }
-
- source := make(map[string]interface{})
-
- params := make(map[string]interface{})
-
- if q.geohash != "" {
- params[q.name] = q.geohash
- } else {
- location := make(map[string]interface{})
- location["lat"] = q.lat
- location["lon"] = q.lon
- params[q.name] = location
- }
-
- if q.distance != "" {
- params["distance"] = q.distance
- }
- if q.distanceType != "" {
- params["distance_type"] = q.distanceType
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
-
- source["geo_distance"] = params
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go b/vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go
deleted file mode 100644
index dd169575a..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGeoDistanceQuery(t *testing.T) {
- q := NewGeoDistanceQuery("pin.location")
- q = q.Lat(40)
- q = q.Lon(-70)
- q = q.Distance("200km")
- q = q.DistanceType("plane")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","pin.location":{"lat":40,"lon":-70}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoDistanceQueryWithGeoPoint(t *testing.T) {
- q := NewGeoDistanceQuery("pin.location")
- q = q.GeoPoint(GeoPointFromLatLon(40, -70))
- q = q.Distance("200km")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_distance":{"distance":"200km","pin.location":{"lat":40,"lon":-70}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoDistanceQueryWithGeoHash(t *testing.T) {
- q := NewGeoDistanceQuery("pin.location")
- q = q.GeoHash("drm3btev3e86")
- q = q.Distance("12km")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_distance":{"distance":"12km","pin.location":"drm3btev3e86"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_polygon.go b/vendor/github.com/olivere/elastic/search_queries_geo_polygon.go
deleted file mode 100644
index 7678c3f3b..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_geo_polygon.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// GeoPolygonQuery allows to include hits that only fall within a polygon of points.
-//
-// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-polygon-query.html
-type GeoPolygonQuery struct {
- name string
- points []*GeoPoint
- queryName string
-}
-
-// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery.
-func NewGeoPolygonQuery(name string) *GeoPolygonQuery {
- return &GeoPolygonQuery{
- name: name,
- points: make([]*GeoPoint, 0),
- }
-}
-
-// AddPoint adds a point from latitude and longitude.
-func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery {
- q.points = append(q.points, GeoPointFromLatLon(lat, lon))
- return q
-}
-
-// AddGeoPoint adds a GeoPoint.
-func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery {
- q.points = append(q.points, point)
- return q
-}
-
-func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *GeoPolygonQuery) Source() (interface{}, error) {
- // "geo_polygon" : {
- // "person.location" : {
- // "points" : [
- // {"lat" : 40, "lon" : -70},
- // {"lat" : 30, "lon" : -80},
- // {"lat" : 20, "lon" : -90}
- // ]
- // }
- // }
- source := make(map[string]interface{})
-
- params := make(map[string]interface{})
- source["geo_polygon"] = params
-
- polygon := make(map[string]interface{})
- params[q.name] = polygon
-
- var points []interface{}
- for _, point := range q.points {
- points = append(points, point.Source())
- }
- polygon["points"] = points
-
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go b/vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go
deleted file mode 100644
index 932c57d7b..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestGeoPolygonQuery(t *testing.T) {
- q := NewGeoPolygonQuery("person.location")
- q = q.AddPoint(40, -70)
- q = q.AddPoint(30, -80)
- point, err := GeoPointFromString("20,-90")
- if err != nil {
- t.Fatalf("GeoPointFromString failed: %v", err)
- }
- q = q.AddGeoPoint(point)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoPolygonQueryFromGeoPoints(t *testing.T) {
- q := NewGeoPolygonQuery("person.location")
- q = q.AddGeoPoint(&GeoPoint{Lat: 40, Lon: -70})
- q = q.AddGeoPoint(GeoPointFromLatLon(30, -80))
- point, err := GeoPointFromString("20,-90")
- if err != nil {
- t.Fatalf("GeoPointFromString failed: %v", err)
- }
- q = q.AddGeoPoint(point)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_has_child.go b/vendor/github.com/olivere/elastic/search_queries_has_child.go
deleted file mode 100644
index 41e7429c4..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_has_child.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// HasChildQuery accepts a query and the child type to run against, and results
-// in parent documents that have child docs matching the query.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-has-child-query.html
-type HasChildQuery struct {
- query Query
- childType string
- boost *float64
- scoreMode string
- minChildren *int
- maxChildren *int
- shortCircuitCutoff *int
- queryName string
- innerHit *InnerHit
-}
-
-// NewHasChildQuery creates and initializes a new has_child query.
-func NewHasChildQuery(childType string, query Query) *HasChildQuery {
- return &HasChildQuery{
- query: query,
- childType: childType,
- }
-}
-
-// Boost sets the boost for this query.
-func (q *HasChildQuery) Boost(boost float64) *HasChildQuery {
- q.boost = &boost
- return q
-}
-
-// ScoreMode defines how the scores from the matching child documents
-// are mapped into the parent document. Allowed values are: min, max,
-// avg, or none.
-func (q *HasChildQuery) ScoreMode(scoreMode string) *HasChildQuery {
- q.scoreMode = scoreMode
- return q
-}
-
-// MinChildren defines the minimum number of children that are required
-// to match for the parent to be considered a match.
-func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery {
- q.minChildren = &minChildren
- return q
-}
-
-// MaxChildren defines the maximum number of children that are required
-// to match for the parent to be considered a match.
-func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery {
- q.maxChildren = &maxChildren
- return q
-}
-
-// ShortCircuitCutoff configures what cut off point only to evaluate
-// parent documents that contain the matching parent id terms instead
-// of evaluating all parent docs.
-func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery {
- q.shortCircuitCutoff = &shortCircuitCutoff
- return q
-}
-
-// QueryName specifies the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery {
- q.queryName = queryName
- return q
-}
-
-// InnerHit sets the inner hit definition in the scope of this query and
-// reusing the defined type and query.
-func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery {
- q.innerHit = innerHit
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *HasChildQuery) Source() (interface{}, error) {
- // {
- // "has_child" : {
- // "type" : "blog_tag",
- // "score_mode" : "min",
- // "query" : {
- // "term" : {
- // "tag" : "something"
- // }
- // }
- // }
- // }
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["has_child"] = query
-
- src, err := q.query.Source()
- if err != nil {
- return nil, err
- }
- query["query"] = src
- query["type"] = q.childType
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.scoreMode != "" {
- query["score_mode"] = q.scoreMode
- }
- if q.minChildren != nil {
- query["min_children"] = *q.minChildren
- }
- if q.maxChildren != nil {
- query["max_children"] = *q.maxChildren
- }
- if q.shortCircuitCutoff != nil {
- query["short_circuit_cutoff"] = *q.shortCircuitCutoff
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
- if q.innerHit != nil {
- src, err := q.innerHit.Source()
- if err != nil {
- return nil, err
- }
- query["inner_hits"] = src
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_has_child_test.go b/vendor/github.com/olivere/elastic/search_queries_has_child_test.go
deleted file mode 100644
index 745c263f9..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_has_child_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestHasChildQuery(t *testing.T) {
- q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")).ScoreMode("min")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"has_child":{"query":{"term":{"tag":"something"}},"score_mode":"min","type":"blog_tag"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestHasChildQueryWithInnerHit(t *testing.T) {
- q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something"))
- q = q.InnerHit(NewInnerHit().Name("comments"))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_has_parent.go b/vendor/github.com/olivere/elastic/search_queries_has_parent.go
deleted file mode 100644
index 5e1b650af..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_has_parent.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// HasParentQuery accepts a query and a parent type. The query is executed
-// in the parent document space which is specified by the parent type.
-// This query returns child documents which associated parents have matched.
-// For the rest has_parent query has the same options and works in the
-// same manner as has_child query.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-has-parent-query.html
-type HasParentQuery struct {
- query Query
- parentType string
- boost *float64
- score *bool
- queryName string
- innerHit *InnerHit
-}
-
-// NewHasParentQuery creates and initializes a new has_parent query.
-func NewHasParentQuery(parentType string, query Query) *HasParentQuery {
- return &HasParentQuery{
- query: query,
- parentType: parentType,
- }
-}
-
-// Boost sets the boost for this query.
-func (q *HasParentQuery) Boost(boost float64) *HasParentQuery {
- q.boost = &boost
- return q
-}
-
-// Score defines if the parent score is mapped into the child documents.
-func (q *HasParentQuery) Score(score bool) *HasParentQuery {
- q.score = &score
- return q
-}
-
-// QueryName specifies the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery {
- q.queryName = queryName
- return q
-}
-
-// InnerHit sets the inner hit definition in the scope of this query and
-// reusing the defined type and query.
-func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery {
- q.innerHit = innerHit
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *HasParentQuery) Source() (interface{}, error) {
- // {
- // "has_parent" : {
- // "parent_type" : "blog",
- // "query" : {
- // "term" : {
- // "tag" : "something"
- // }
- // }
- // }
- // }
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["has_parent"] = query
-
- src, err := q.query.Source()
- if err != nil {
- return nil, err
- }
- query["query"] = src
- query["parent_type"] = q.parentType
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.score != nil {
- query["score"] = *q.score
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
- if q.innerHit != nil {
- src, err := q.innerHit.Source()
- if err != nil {
- return nil, err
- }
- query["inner_hits"] = src
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_has_parent_test.go b/vendor/github.com/olivere/elastic/search_queries_has_parent_test.go
deleted file mode 100644
index 0fec395e3..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_has_parent_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestHasParentQueryTest(t *testing.T) {
- q := NewHasParentQuery("blog", NewTermQuery("tag", "something")).Score(true)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}},"score":true}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_ids.go b/vendor/github.com/olivere/elastic/search_queries_ids.go
deleted file mode 100644
index e067aebbe..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_ids.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// IdsQuery filters documents that only have the provided ids.
-// Note, this query uses the _uid field.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-ids-query.html
-type IdsQuery struct {
- types []string
- values []string
- boost *float64
- queryName string
-}
-
-// NewIdsQuery creates and initializes a new ids query.
-func NewIdsQuery(types ...string) *IdsQuery {
- return &IdsQuery{
- types: types,
- values: make([]string, 0),
- }
-}
-
-// Ids adds ids to the filter.
-func (q *IdsQuery) Ids(ids ...string) *IdsQuery {
- q.values = append(q.values, ids...)
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *IdsQuery) Boost(boost float64) *IdsQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter.
-func (q *IdsQuery) QueryName(queryName string) *IdsQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *IdsQuery) Source() (interface{}, error) {
- // {
- // "ids" : {
- // "type" : "my_type",
- // "values" : ["1", "4", "100"]
- // }
- // }
-
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["ids"] = query
-
- // type(s)
- if len(q.types) == 1 {
- query["type"] = q.types[0]
- } else if len(q.types) > 1 {
- query["types"] = q.types
- }
-
- // values
- query["values"] = q.values
-
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_ids_test.go b/vendor/github.com/olivere/elastic/search_queries_ids_test.go
deleted file mode 100644
index b36605b4d..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_ids_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestIdsQuery(t *testing.T) {
- q := NewIdsQuery("my_type").Ids("1", "4", "100").Boost(10.5).QueryName("my_query")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"ids":{"_name":"my_query","boost":10.5,"type":"my_type","values":["1","4","100"]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match.go b/vendor/github.com/olivere/elastic/search_queries_match.go
deleted file mode 100644
index b38b12452..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MatchQuery is a family of queries that accepts text/numerics/dates,
-// analyzes them, and constructs a query.
-//
-// To create a new MatchQuery, use NewMatchQuery. To create specific types
-// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"),
-// or use one of the shortcuts e.g. NewMatchPhraseQuery(...).
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query.html
-type MatchQuery struct {
- name string
- text interface{}
- operator string // or / and
- analyzer string
- boost *float64
- fuzziness string
- prefixLength *int
- maxExpansions *int
- minimumShouldMatch string
- fuzzyRewrite string
- lenient *bool
- fuzzyTranspositions *bool
- zeroTermsQuery string
- cutoffFrequency *float64
- queryName string
-}
-
-// NewMatchQuery creates and initializes a new MatchQuery.
-func NewMatchQuery(name string, text interface{}) *MatchQuery {
- return &MatchQuery{name: name, text: text}
-}
-
-// Operator sets the operator to use when using a boolean query.
-// Can be "AND" or "OR" (default).
-func (q *MatchQuery) Operator(operator string) *MatchQuery {
- q.operator = operator
- return q
-}
-
-// Analyzer explicitly sets the analyzer to use. It defaults to use explicit
-// mapping config for the field, or, if not set, the default search analyzer.
-func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery {
- q.analyzer = analyzer
- return q
-}
-
-// Fuzziness sets the fuzziness when evaluated to a fuzzy query type.
-// Defaults to "AUTO".
-func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery {
- q.fuzziness = fuzziness
- return q
-}
-
-// PrefixLength sets the length of a length of common (non-fuzzy)
-// prefix for fuzzy match queries. It must be non-negative.
-func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery {
- q.prefixLength = &prefixLength
- return q
-}
-
-// MaxExpansions is used with fuzzy or prefix type queries. It specifies
-// the number of term expansions to use. It defaults to unbounded so that
-// its recommended to set it to a reasonable value for faster execution.
-func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery {
- q.maxExpansions = &maxExpansions
- return q
-}
-
-// CutoffFrequency can be a value in [0..1] (or an absolute number >=1).
-// It represents the maximum treshold of a terms document frequency to be
-// considered a low frequency term.
-func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery {
- q.cutoffFrequency = &cutoff
- return q
-}
-
-// MinimumShouldMatch sets the optional minimumShouldMatch value to
-// apply to the query.
-func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery {
- q.minimumShouldMatch = minimumShouldMatch
- return q
-}
-
-// FuzzyRewrite sets the fuzzy_rewrite parameter controlling how the
-// fuzzy query will get rewritten.
-func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery {
- q.fuzzyRewrite = fuzzyRewrite
- return q
-}
-
-// FuzzyTranspositions sets whether transpositions are supported in
-// fuzzy queries.
-//
-// The default metric used by fuzzy queries to determine a match is
-// the Damerau-Levenshtein distance formula which supports transpositions.
-// Setting transposition to false will
-// * switch to classic Levenshtein distance.
-// * If not set, Damerau-Levenshtein distance metric will be used.
-func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery {
- q.fuzzyTranspositions = &fuzzyTranspositions
- return q
-}
-
-// Lenient specifies whether format based failures will be ignored.
-func (q *MatchQuery) Lenient(lenient bool) *MatchQuery {
- q.lenient = &lenient
- return q
-}
-
-// ZeroTermsQuery can be "all" or "none".
-func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery {
- q.zeroTermsQuery = zeroTermsQuery
- return q
-}
-
-// Boost sets the boost to apply to this query.
-func (q *MatchQuery) Boost(boost float64) *MatchQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *MatchQuery) QueryName(queryName string) *MatchQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *MatchQuery) Source() (interface{}, error) {
- // {"match":{"name":{"query":"value","type":"boolean/phrase"}}}
- source := make(map[string]interface{})
-
- match := make(map[string]interface{})
- source["match"] = match
-
- query := make(map[string]interface{})
- match[q.name] = query
-
- query["query"] = q.text
-
- if q.operator != "" {
- query["operator"] = q.operator
- }
- if q.analyzer != "" {
- query["analyzer"] = q.analyzer
- }
- if q.fuzziness != "" {
- query["fuzziness"] = q.fuzziness
- }
- if q.prefixLength != nil {
- query["prefix_length"] = *q.prefixLength
- }
- if q.maxExpansions != nil {
- query["max_expansions"] = *q.maxExpansions
- }
- if q.minimumShouldMatch != "" {
- query["minimum_should_match"] = q.minimumShouldMatch
- }
- if q.fuzzyRewrite != "" {
- query["fuzzy_rewrite"] = q.fuzzyRewrite
- }
- if q.lenient != nil {
- query["lenient"] = *q.lenient
- }
- if q.fuzzyTranspositions != nil {
- query["fuzzy_transpositions"] = *q.fuzzyTranspositions
- }
- if q.zeroTermsQuery != "" {
- query["zero_terms_query"] = q.zeroTermsQuery
- }
- if q.cutoffFrequency != nil {
- query["cutoff_frequency"] = q.cutoffFrequency
- }
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_all.go b/vendor/github.com/olivere/elastic/search_queries_match_all.go
deleted file mode 100644
index 3829c8af0..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_all.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MatchAllQuery is the most simple query, which matches all documents,
-// giving them all a _score of 1.0.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-all-query.html
-type MatchAllQuery struct {
- boost *float64
- queryName string
-}
-
-// NewMatchAllQuery creates and initializes a new match all query.
-func NewMatchAllQuery() *MatchAllQuery {
- return &MatchAllQuery{}
-}
-
-// Boost sets the boost for this query. Documents matching this query will
-// (in addition to the normal weightings) have their score multiplied by the
-// boost provided.
-func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name.
-func (q *MatchAllQuery) QueryName(name string) *MatchAllQuery {
- q.queryName = name
- return q
-}
-
-// Source returns JSON for the match all query.
-func (q MatchAllQuery) Source() (interface{}, error) {
- // {
- // "match_all" : { ... }
- // }
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["match_all"] = params
- if q.boost != nil {
- params["boost"] = *q.boost
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_all_test.go b/vendor/github.com/olivere/elastic/search_queries_match_all_test.go
deleted file mode 100644
index 5d8671025..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_all_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMatchAllQuery(t *testing.T) {
- q := NewMatchAllQuery()
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_all":{}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMatchAllQueryWithBoost(t *testing.T) {
- q := NewMatchAllQuery().Boost(3.14)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_all":{"boost":3.14}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMatchAllQueryWithQueryName(t *testing.T) {
- q := NewMatchAllQuery().QueryName("qname")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_all":{"_name":"qname"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_none.go b/vendor/github.com/olivere/elastic/search_queries_match_none.go
deleted file mode 100644
index 9afe16716..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_none.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MatchNoneQuery returns no documents. It is the inverse of
-// MatchAllQuery.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-all-query.html
-type MatchNoneQuery struct {
- queryName string
-}
-
-// NewMatchNoneQuery creates and initializes a new match none query.
-func NewMatchNoneQuery() *MatchNoneQuery {
- return &MatchNoneQuery{}
-}
-
-// QueryName sets the query name.
-func (q *MatchNoneQuery) QueryName(name string) *MatchNoneQuery {
- q.queryName = name
- return q
-}
-
-// Source returns JSON for the match none query.
-func (q MatchNoneQuery) Source() (interface{}, error) {
- // {
- // "match_none" : { ... }
- // }
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["match_none"] = params
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_none_test.go b/vendor/github.com/olivere/elastic/search_queries_match_none_test.go
deleted file mode 100644
index 6463452da..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_none_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMatchNoneQuery(t *testing.T) {
- q := NewMatchNoneQuery()
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_none":{}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMatchNoneQueryWithQueryName(t *testing.T) {
- q := NewMatchNoneQuery().QueryName("qname")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_none":{"_name":"qname"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase.go
deleted file mode 100644
index 0e4c6327e..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_phrase.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MatchPhraseQuery analyzes the text and creates a phrase query out of
-// the analyzed text.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query-phrase.html
-type MatchPhraseQuery struct {
- name string
- value interface{}
- analyzer string
- slop *int
- boost *float64
- queryName string
-}
-
-// NewMatchPhraseQuery creates and initializes a new MatchPhraseQuery.
-func NewMatchPhraseQuery(name string, value interface{}) *MatchPhraseQuery {
- return &MatchPhraseQuery{name: name, value: value}
-}
-
-// Analyzer explicitly sets the analyzer to use. It defaults to use explicit
-// mapping config for the field, or, if not set, the default search analyzer.
-func (q *MatchPhraseQuery) Analyzer(analyzer string) *MatchPhraseQuery {
- q.analyzer = analyzer
- return q
-}
-
-// Slop sets the phrase slop if evaluated to a phrase query type.
-func (q *MatchPhraseQuery) Slop(slop int) *MatchPhraseQuery {
- q.slop = &slop
- return q
-}
-
-// Boost sets the boost to apply to this query.
-func (q *MatchPhraseQuery) Boost(boost float64) *MatchPhraseQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *MatchPhraseQuery) QueryName(queryName string) *MatchPhraseQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *MatchPhraseQuery) Source() (interface{}, error) {
- // {"match_phrase":{"name":{"query":"value","analyzer":"my_analyzer"}}}
- source := make(map[string]interface{})
-
- match := make(map[string]interface{})
- source["match_phrase"] = match
-
- query := make(map[string]interface{})
- match[q.name] = query
-
- query["query"] = q.value
-
- if q.analyzer != "" {
- query["analyzer"] = q.analyzer
- }
- if q.slop != nil {
- query["slop"] = *q.slop
- }
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go
deleted file mode 100644
index 10a88668d..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// MatchPhrasePrefixQuery is the same as match_phrase, except that it allows for
-// prefix matches on the last term in the text.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query-phrase-prefix.html
-type MatchPhrasePrefixQuery struct {
- name string
- value interface{}
- analyzer string
- slop *int
- maxExpansions *int
- boost *float64
- queryName string
-}
-
-// NewMatchPhrasePrefixQuery creates and initializes a new MatchPhrasePrefixQuery.
-func NewMatchPhrasePrefixQuery(name string, value interface{}) *MatchPhrasePrefixQuery {
- return &MatchPhrasePrefixQuery{name: name, value: value}
-}
-
-// Analyzer explicitly sets the analyzer to use. It defaults to use explicit
-// mapping config for the field, or, if not set, the default search analyzer.
-func (q *MatchPhrasePrefixQuery) Analyzer(analyzer string) *MatchPhrasePrefixQuery {
- q.analyzer = analyzer
- return q
-}
-
-// Slop sets the phrase slop if evaluated to a phrase query type.
-func (q *MatchPhrasePrefixQuery) Slop(slop int) *MatchPhrasePrefixQuery {
- q.slop = &slop
- return q
-}
-
-// MaxExpansions sets the number of term expansions to use.
-func (q *MatchPhrasePrefixQuery) MaxExpansions(n int) *MatchPhrasePrefixQuery {
- q.maxExpansions = &n
- return q
-}
-
-// Boost sets the boost to apply to this query.
-func (q *MatchPhrasePrefixQuery) Boost(boost float64) *MatchPhrasePrefixQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *MatchPhrasePrefixQuery) QueryName(queryName string) *MatchPhrasePrefixQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *MatchPhrasePrefixQuery) Source() (interface{}, error) {
- // {"match_phrase_prefix":{"name":{"query":"value","max_expansions":10}}}
- source := make(map[string]interface{})
-
- match := make(map[string]interface{})
- source["match_phrase_prefix"] = match
-
- query := make(map[string]interface{})
- match[q.name] = query
-
- query["query"] = q.value
-
- if q.analyzer != "" {
- query["analyzer"] = q.analyzer
- }
- if q.slop != nil {
- query["slop"] = *q.slop
- }
- if q.maxExpansions != nil {
- query["max_expansions"] = *q.maxExpansions
- }
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go
deleted file mode 100644
index 82a02f17d..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMatchPhrasePrefixQuery(t *testing.T) {
- q := NewMatchPhrasePrefixQuery("message", "this is a test").Boost(0.3).MaxExpansions(5)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_phrase_prefix":{"message":{"boost":0.3,"max_expansions":5,"query":"this is a test"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go
deleted file mode 100644
index 85e60d8b5..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMatchPhraseQuery(t *testing.T) {
- q := NewMatchPhraseQuery("message", "this is a test").
- Analyzer("my_analyzer").
- Boost(0.7)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_phrase":{"message":{"analyzer":"my_analyzer","boost":0.7,"query":"this is a test"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_test.go b/vendor/github.com/olivere/elastic/search_queries_match_test.go
deleted file mode 100644
index dd750cf93..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_match_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMatchQuery(t *testing.T) {
- q := NewMatchQuery("message", "this is a test")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match":{"message":{"query":"this is a test"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMatchQueryWithOptions(t *testing.T) {
- q := NewMatchQuery("message", "this is a test").Analyzer("whitespace").Operator("or").Boost(2.5)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match":{"message":{"analyzer":"whitespace","boost":2.5,"operator":"or","query":"this is a test"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_more_like_this.go b/vendor/github.com/olivere/elastic/search_queries_more_like_this.go
deleted file mode 100644
index 5c71e291f..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_more_like_this.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// MoreLikeThis query (MLT Query) finds documents that are "like" a given
-// set of documents. In order to do so, MLT selects a set of representative
-// terms of these input documents, forms a query using these terms, executes
-// the query and returns the results. The user controls the input documents,
-// how the terms should be selected and how the query is formed.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-mlt-query.html
-type MoreLikeThisQuery struct {
- fields []string
- docs []*MoreLikeThisQueryItem
- unlikeDocs []*MoreLikeThisQueryItem
- include *bool
- minimumShouldMatch string
- minTermFreq *int
- maxQueryTerms *int
- stopWords []string
- minDocFreq *int
- maxDocFreq *int
- minWordLength *int
- maxWordLength *int
- boostTerms *float64
- boost *float64
- analyzer string
- failOnUnsupportedField *bool
- queryName string
-}
-
-// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery.
-func NewMoreLikeThisQuery() *MoreLikeThisQuery {
- return &MoreLikeThisQuery{
- fields: make([]string, 0),
- stopWords: make([]string, 0),
- docs: make([]*MoreLikeThisQueryItem, 0),
- unlikeDocs: make([]*MoreLikeThisQueryItem, 0),
- }
-}
-
-// Field adds one or more field names to the query.
-func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery {
- q.fields = append(q.fields, fields...)
- return q
-}
-
-// StopWord sets the stopwords. Any word in this set is considered
-// "uninteresting" and ignored. Even if your Analyzer allows stopwords,
-// you might want to tell the MoreLikeThis code to ignore them, as for
-// the purposes of document similarity it seems reasonable to assume that
-// "a stop word is never interesting".
-func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery {
- q.stopWords = append(q.stopWords, stopWords...)
- return q
-}
-
-// LikeText sets the text to use in order to find documents that are "like" this.
-func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery {
- for _, s := range likeTexts {
- item := NewMoreLikeThisQueryItem().LikeText(s)
- q.docs = append(q.docs, item)
- }
- return q
-}
-
-// LikeItems sets the documents to use in order to find documents that are "like" this.
-func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery {
- q.docs = append(q.docs, docs...)
- return q
-}
-
-// IgnoreLikeText sets the text from which the terms should not be selected from.
-func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery {
- for _, s := range ignoreLikeText {
- item := NewMoreLikeThisQueryItem().LikeText(s)
- q.unlikeDocs = append(q.unlikeDocs, item)
- }
- return q
-}
-
-// IgnoreLikeItems sets the documents from which the terms should not be selected from.
-func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery {
- q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...)
- return q
-}
-
-// Ids sets the document ids to use in order to find documents that are "like" this.
-func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery {
- for _, id := range ids {
- item := NewMoreLikeThisQueryItem().Id(id)
- q.docs = append(q.docs, item)
- }
- return q
-}
-
-// Include specifies whether the input documents should also be included
-// in the results returned. Defaults to false.
-func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery {
- q.include = &include
- return q
-}
-
-// MinimumShouldMatch sets the number of terms that must match the generated
-// query expressed in the common syntax for minimum should match.
-// The default value is "30%".
-//
-// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0.
-func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery {
- q.minimumShouldMatch = minimumShouldMatch
- return q
-}
-
-// MinTermFreq is the frequency below which terms will be ignored in the
-// source doc. The default frequency is 2.
-func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery {
- q.minTermFreq = &minTermFreq
- return q
-}
-
-// MaxQueryTerms sets the maximum number of query terms that will be included
-// in any generated query. It defaults to 25.
-func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery {
- q.maxQueryTerms = &maxQueryTerms
- return q
-}
-
-// MinDocFreq sets the frequency at which words will be ignored which do
-// not occur in at least this many docs. The default is 5.
-func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery {
- q.minDocFreq = &minDocFreq
- return q
-}
-
-// MaxDocFreq sets the maximum frequency for which words may still appear.
-// Words that appear in more than this many docs will be ignored.
-// It defaults to unbounded.
-func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery {
- q.maxDocFreq = &maxDocFreq
- return q
-}
-
-// MinWordLength sets the minimum word length below which words will be
-// ignored. It defaults to 0.
-func (q *MoreLikeThisQuery) MinWordLength(minWordLength int) *MoreLikeThisQuery {
- q.minWordLength = &minWordLength
- return q
-}
-
-// MaxWordLength sets the maximum word length above which words will be ignored.
-// Defaults to unbounded (0).
-func (q *MoreLikeThisQuery) MaxWordLength(maxWordLength int) *MoreLikeThisQuery {
- q.maxWordLength = &maxWordLength
- return q
-}
-
-// BoostTerms sets the boost factor to use when boosting terms.
-// It defaults to 1.
-func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery {
- q.boostTerms = &boostTerms
- return q
-}
-
-// Analyzer specifies the analyzer that will be use to analyze the text.
-// Defaults to the analyzer associated with the field.
-func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery {
- q.analyzer = analyzer
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery {
- q.boost = &boost
- return q
-}
-
-// FailOnUnsupportedField indicates whether to fail or return no result
-// when this query is run against a field which is not supported such as
-// a binary/numeric field.
-func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery {
- q.failOnUnsupportedField = &fail
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched_filters per hit.
-func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery {
- q.queryName = queryName
- return q
-}
-
-// Source creates the source for the MLT query.
-// It may return an error if the caller forgot to specify any documents to
-// be "liked" in the MoreLikeThisQuery.
-func (q *MoreLikeThisQuery) Source() (interface{}, error) {
- // {
- // "match_all" : { ... }
- // }
- if len(q.docs) == 0 {
- return nil, errors.New(`more_like_this requires some documents to be "liked"`)
- }
-
- source := make(map[string]interface{})
-
- params := make(map[string]interface{})
- source["more_like_this"] = params
-
- if len(q.fields) > 0 {
- params["fields"] = q.fields
- }
-
- var likes []interface{}
- for _, doc := range q.docs {
- src, err := doc.Source()
- if err != nil {
- return nil, err
- }
- likes = append(likes, src)
- }
- params["like"] = likes
-
- if len(q.unlikeDocs) > 0 {
- var dontLikes []interface{}
- for _, doc := range q.unlikeDocs {
- src, err := doc.Source()
- if err != nil {
- return nil, err
- }
- dontLikes = append(dontLikes, src)
- }
- params["unlike"] = dontLikes
- }
-
- if q.minimumShouldMatch != "" {
- params["minimum_should_match"] = q.minimumShouldMatch
- }
- if q.minTermFreq != nil {
- params["min_term_freq"] = *q.minTermFreq
- }
- if q.maxQueryTerms != nil {
- params["max_query_terms"] = *q.maxQueryTerms
- }
- if len(q.stopWords) > 0 {
- params["stop_words"] = q.stopWords
- }
- if q.minDocFreq != nil {
- params["min_doc_freq"] = *q.minDocFreq
- }
- if q.maxDocFreq != nil {
- params["max_doc_freq"] = *q.maxDocFreq
- }
- if q.minWordLength != nil {
- params["min_word_length"] = *q.minWordLength
- }
- if q.maxWordLength != nil {
- params["max_word_length"] = *q.maxWordLength
- }
- if q.boostTerms != nil {
- params["boost_terms"] = *q.boostTerms
- }
- if q.boost != nil {
- params["boost"] = *q.boost
- }
- if q.analyzer != "" {
- params["analyzer"] = q.analyzer
- }
- if q.failOnUnsupportedField != nil {
- params["fail_on_unsupported_field"] = *q.failOnUnsupportedField
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
- if q.include != nil {
- params["include"] = *q.include
- }
-
- return source, nil
-}
-
-// -- MoreLikeThisQueryItem --
-
-// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery
-// to be "liked" or "unliked".
-type MoreLikeThisQueryItem struct {
- likeText string
-
- index string
- typ string
- id string
- doc interface{}
- fields []string
- routing string
- fsc *FetchSourceContext
- version int64
- versionType string
-}
-
-// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem.
-func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem {
- return &MoreLikeThisQueryItem{
- version: -1,
- }
-}
-
-// LikeText represents a text to be "liked".
-func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem {
- item.likeText = likeText
- return item
-}
-
-// Index represents the index of the item.
-func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem {
- item.index = index
- return item
-}
-
-// Type represents the document type of the item.
-func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem {
- item.typ = typ
- return item
-}
-
-// Id represents the document id of the item.
-func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem {
- item.id = id
- return item
-}
-
-// Doc represents a raw document template for the item.
-func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem {
- item.doc = doc
- return item
-}
-
-// Fields represents the list of fields of the item.
-func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem {
- item.fields = append(item.fields, fields...)
- return item
-}
-
-// Routing sets the routing associated with the item.
-func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem {
- item.routing = routing
- return item
-}
-
-// FetchSourceContext represents the fetch source of the item which controls
-// if and how _source should be returned.
-func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem {
- item.fsc = fsc
- return item
-}
-
-// Version specifies the version of the item.
-func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem {
- item.version = version
- return item
-}
-
-// VersionType represents the version type of the item.
-func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem {
- item.versionType = versionType
- return item
-}
-
-// Source returns the JSON-serializable fragment of the entity.
-func (item *MoreLikeThisQueryItem) Source() (interface{}, error) {
- if item.likeText != "" {
- return item.likeText, nil
- }
-
- source := make(map[string]interface{})
-
- if item.index != "" {
- source["_index"] = item.index
- }
- if item.typ != "" {
- source["_type"] = item.typ
- }
- if item.id != "" {
- source["_id"] = item.id
- }
- if item.doc != nil {
- source["doc"] = item.doc
- }
- if len(item.fields) > 0 {
- source["fields"] = item.fields
- }
- if item.routing != "" {
- source["_routing"] = item.routing
- }
- if item.fsc != nil {
- src, err := item.fsc.Source()
- if err != nil {
- return nil, err
- }
- source["_source"] = src
- }
- if item.version >= 0 {
- source["_version"] = item.version
- }
- if item.versionType != "" {
- source["_version_type"] = item.versionType
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go b/vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go
deleted file mode 100644
index dcbbe74d1..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) {
- q := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatal(err)
- }
- got := string(data)
- expected := `{"more_like_this":{"fields":["message"],"like":["Golang topic"]}}`
- if got != expected {
- t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMoreLikeThisQuerySourceWithLikeAndUnlikeItems(t *testing.T) {
- q := NewMoreLikeThisQuery()
- q = q.LikeItems(
- NewMoreLikeThisQueryItem().Id("1"),
- NewMoreLikeThisQueryItem().Index(testIndexName2).Type("comment").Id("2").Routing("routing_id"),
- )
- q = q.IgnoreLikeItems(NewMoreLikeThisQueryItem().Id("3"))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatal(err)
- }
- got := string(data)
- expected := `{"more_like_this":{"like":[{"_id":"1"},{"_id":"2","_index":"elastic-test2","_routing":"routing_id","_type":"comment"}],"unlike":[{"_id":"3"}]}}`
- if got != expected {
- t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMoreLikeThisQuery(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another Golang topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Common query
- mltq := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message")
- res, err := client.Search().
- Index(testIndexName).
- Query(mltq).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_multi_match.go b/vendor/github.com/olivere/elastic/search_queries_multi_match.go
deleted file mode 100644
index b6ff2107e..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_multi_match.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "fmt"
- "strings"
-)
-
-// MultiMatchQuery builds on the MatchQuery to allow multi-field queries.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-multi-match-query.html
-type MultiMatchQuery struct {
- text interface{}
- fields []string
- fieldBoosts map[string]*float64
- typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix
- operator string // AND or OR
- analyzer string
- boost *float64
- slop *int
- fuzziness string
- prefixLength *int
- maxExpansions *int
- minimumShouldMatch string
- rewrite string
- fuzzyRewrite string
- tieBreaker *float64
- lenient *bool
- cutoffFrequency *float64
- zeroTermsQuery string
- queryName string
-}
-
-// MultiMatchQuery creates and initializes a new MultiMatchQuery.
-func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery {
- q := &MultiMatchQuery{
- text: text,
- fields: make([]string, 0),
- fieldBoosts: make(map[string]*float64),
- }
- q.fields = append(q.fields, fields...)
- return q
-}
-
-// Field adds a field to run the multi match against.
-func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery {
- q.fields = append(q.fields, field)
- return q
-}
-
-// FieldWithBoost adds a field to run the multi match against with a specific boost.
-func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery {
- q.fields = append(q.fields, field)
- q.fieldBoosts[field] = &boost
- return q
-}
-
-// Type can be "best_fields", "boolean", "most_fields", "cross_fields",
-// "phrase", or "phrase_prefix".
-func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery {
- var zero = float64(0.0)
- var one = float64(1.0)
-
- switch strings.ToLower(typ) {
- default: // best_fields / boolean
- q.typ = "best_fields"
- q.tieBreaker = &zero
- case "most_fields":
- q.typ = "most_fields"
- q.tieBreaker = &one
- case "cross_fields":
- q.typ = "cross_fields"
- q.tieBreaker = &zero
- case "phrase":
- q.typ = "phrase"
- q.tieBreaker = &zero
- case "phrase_prefix":
- q.typ = "phrase_prefix"
- q.tieBreaker = &zero
- }
- return q
-}
-
-// Operator sets the operator to use when using boolean query.
-// It can be either AND or OR (default).
-func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery {
- q.operator = operator
- return q
-}
-
-// Analyzer sets the analyzer to use explicitly. It defaults to use explicit
-// mapping config for the field, or, if not set, the default search analyzer.
-func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery {
- q.analyzer = analyzer
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery {
- q.boost = &boost
- return q
-}
-
-// Slop sets the phrase slop if evaluated to a phrase query type.
-func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery {
- q.slop = &slop
- return q
-}
-
-// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type.
-// It defaults to "AUTO".
-func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery {
- q.fuzziness = fuzziness
- return q
-}
-
-// PrefixLength for the fuzzy process.
-func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery {
- q.prefixLength = &prefixLength
- return q
-}
-
-// MaxExpansions is the number of term expansions to use when using fuzzy
-// or prefix type query. It defaults to unbounded so it's recommended
-// to set it to a reasonable value for faster execution.
-func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery {
- q.maxExpansions = &maxExpansions
- return q
-}
-
-// MinimumShouldMatch represents the minimum number of optional should clauses
-// to match.
-func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery {
- q.minimumShouldMatch = minimumShouldMatch
- return q
-}
-
-func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery {
- q.rewrite = rewrite
- return q
-}
-
-func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery {
- q.fuzzyRewrite = fuzzyRewrite
- return q
-}
-
-// TieBreaker for "best-match" disjunction queries (OR queries).
-// The tie breaker capability allows documents that match more than one
-// query clause (in this case on more than one field) to be scored better
-// than documents that match only the best of the fields, without confusing
-// this with the better case of two distinct matches in the multiple fields.
-//
-// A tie-breaker value of 1.0 is interpreted as a signal to score queries as
-// "most-match" queries where all matching query clauses are considered for scoring.
-func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery {
- q.tieBreaker = &tieBreaker
- return q
-}
-
-// Lenient indicates whether format based failures will be ignored.
-func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery {
- q.lenient = &lenient
- return q
-}
-
-// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1)
-// representing the maximum threshold of a terms document frequency to be
-// considered a low frequency term.
-func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery {
- q.cutoffFrequency = &cutoff
- return q
-}
-
-// ZeroTermsQuery can be "all" or "none".
-func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery {
- q.zeroTermsQuery = zeroTermsQuery
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the query.
-func (q *MultiMatchQuery) Source() (interface{}, error) {
- //
- // {
- // "multi_match" : {
- // "query" : "this is a test",
- // "fields" : [ "subject", "message" ]
- // }
- // }
-
- source := make(map[string]interface{})
-
- multiMatch := make(map[string]interface{})
- source["multi_match"] = multiMatch
-
- multiMatch["query"] = q.text
-
- if len(q.fields) > 0 {
- var fields []string
- for _, field := range q.fields {
- if boost, found := q.fieldBoosts[field]; found {
- if boost != nil {
- fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
- } else {
- fields = append(fields, field)
- }
- } else {
- fields = append(fields, field)
- }
- }
- multiMatch["fields"] = fields
- }
-
- if q.typ != "" {
- multiMatch["type"] = q.typ
- }
-
- if q.operator != "" {
- multiMatch["operator"] = q.operator
- }
- if q.analyzer != "" {
- multiMatch["analyzer"] = q.analyzer
- }
- if q.boost != nil {
- multiMatch["boost"] = *q.boost
- }
- if q.slop != nil {
- multiMatch["slop"] = *q.slop
- }
- if q.fuzziness != "" {
- multiMatch["fuzziness"] = q.fuzziness
- }
- if q.prefixLength != nil {
- multiMatch["prefix_length"] = *q.prefixLength
- }
- if q.maxExpansions != nil {
- multiMatch["max_expansions"] = *q.maxExpansions
- }
- if q.minimumShouldMatch != "" {
- multiMatch["minimum_should_match"] = q.minimumShouldMatch
- }
- if q.rewrite != "" {
- multiMatch["rewrite"] = q.rewrite
- }
- if q.fuzzyRewrite != "" {
- multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite
- }
- if q.tieBreaker != nil {
- multiMatch["tie_breaker"] = *q.tieBreaker
- }
- if q.lenient != nil {
- multiMatch["lenient"] = *q.lenient
- }
- if q.cutoffFrequency != nil {
- multiMatch["cutoff_frequency"] = *q.cutoffFrequency
- }
- if q.zeroTermsQuery != "" {
- multiMatch["zero_terms_query"] = q.zeroTermsQuery
- }
- if q.queryName != "" {
- multiMatch["_name"] = q.queryName
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_multi_match_test.go b/vendor/github.com/olivere/elastic/search_queries_multi_match_test.go
deleted file mode 100644
index d897f7e72..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_multi_match_test.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestMultiMatchQuery(t *testing.T) {
- q := NewMultiMatchQuery("this is a test", "subject", "message")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMultiMatchQueryBestFields(t *testing.T) {
- q := NewMultiMatchQuery("this is a test", "subject", "message").Type("best_fields")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"best_fields"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMultiMatchQueryMostFields(t *testing.T) {
- q := NewMultiMatchQuery("this is a test", "subject", "message").Type("most_fields")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":1,"type":"most_fields"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMultiMatchQueryCrossFields(t *testing.T) {
- q := NewMultiMatchQuery("this is a test", "subject", "message").Type("cross_fields")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"cross_fields"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMultiMatchQueryPhrase(t *testing.T) {
- q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMultiMatchQueryPhrasePrefix(t *testing.T) {
- q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase_prefix")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase_prefix"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestMultiMatchQueryBestFieldsWithCustomTieBreaker(t *testing.T) {
- q := NewMultiMatchQuery("this is a test", "subject", "message").
- Type("best_fields").
- TieBreaker(0.3)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0.3,"type":"best_fields"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_nested.go b/vendor/github.com/olivere/elastic/search_queries_nested.go
deleted file mode 100644
index d0a342283..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_nested.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// NestedQuery allows to query nested objects / docs.
-// The query is executed against the nested objects / docs as if they were
-// indexed as separate docs (they are, internally) and resulting in the
-// root parent doc (or parent nested mapping).
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-nested-query.html
-type NestedQuery struct {
- query Query
- path string
- scoreMode string
- boost *float64
- queryName string
- innerHit *InnerHit
- ignoreUnmapped *bool
-}
-
-// NewNestedQuery creates and initializes a new NestedQuery.
-func NewNestedQuery(path string, query Query) *NestedQuery {
- return &NestedQuery{path: path, query: query}
-}
-
-// ScoreMode specifies the score mode.
-func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery {
- q.scoreMode = scoreMode
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *NestedQuery) Boost(boost float64) *NestedQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched_filters per hit
-func (q *NestedQuery) QueryName(queryName string) *NestedQuery {
- q.queryName = queryName
- return q
-}
-
-// InnerHit sets the inner hit definition in the scope of this nested query
-// and reusing the defined path and query.
-func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery {
- q.innerHit = innerHit
- return q
-}
-
-// IgnoreUnmapped sets the ignore_unmapped option for the filter that ignores
-// unmapped nested fields
-func (q *NestedQuery) IgnoreUnmapped(value bool) *NestedQuery {
- q.ignoreUnmapped = &value
- return q
-}
-
-// Source returns JSON for the query.
-func (q *NestedQuery) Source() (interface{}, error) {
- query := make(map[string]interface{})
- nq := make(map[string]interface{})
- query["nested"] = nq
-
- src, err := q.query.Source()
- if err != nil {
- return nil, err
- }
- nq["query"] = src
-
- nq["path"] = q.path
-
- if q.scoreMode != "" {
- nq["score_mode"] = q.scoreMode
- }
- if q.boost != nil {
- nq["boost"] = *q.boost
- }
- if q.queryName != "" {
- nq["_name"] = q.queryName
- }
- if q.ignoreUnmapped != nil {
- nq["ignore_unmapped"] = *q.ignoreUnmapped
- }
- if q.innerHit != nil {
- src, err := q.innerHit.Source()
- if err != nil {
- return nil, err
- }
- nq["inner_hits"] = src
- }
- return query, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_nested_test.go b/vendor/github.com/olivere/elastic/search_queries_nested_test.go
deleted file mode 100644
index c7a5322a6..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_nested_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestNestedQuery(t *testing.T) {
- bq := NewBoolQuery()
- bq = bq.Must(NewTermQuery("obj1.name", "blue"))
- bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
- q := NewNestedQuery("obj1", bq).QueryName("qname")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"nested":{"_name":"qname","path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestNestedQueryWithInnerHit(t *testing.T) {
- bq := NewBoolQuery()
- bq = bq.Must(NewTermQuery("obj1.name", "blue"))
- bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
- q := NewNestedQuery("obj1", bq)
- q = q.QueryName("qname")
- q = q.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere")))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"nested":{"_name":"qname","inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestNestedQueryWithIgnoreUnmapped(t *testing.T) {
- var tests = []struct {
- query *BoolQuery
- expected string
- }{
- {
- NewBoolQuery().Must(NewNestedQuery("path", NewTermQuery("test", "test"))),
- `{"bool":{"must":{"nested":{"path":"path","query":{"term":{"test":"test"}}}}}}`,
- },
- {
- NewBoolQuery().Must(NewNestedQuery("path", NewTermQuery("test", "test")).IgnoreUnmapped(true)),
- `{"bool":{"must":{"nested":{"ignore_unmapped":true,"path":"path","query":{"term":{"test":"test"}}}}}}`,
- },
- {
- NewBoolQuery().Must(NewNestedQuery("path", NewTermQuery("test", "test")).IgnoreUnmapped(false)),
- `{"bool":{"must":{"nested":{"ignore_unmapped":false,"path":"path","query":{"term":{"test":"test"}}}}}}`,
- },
- }
- for _, test := range tests {
- src, err := test.query.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- if got != test.expected {
- t.Errorf("expected\n%s\n,got:\n%s", test.expected, got)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_parent_id.go b/vendor/github.com/olivere/elastic/search_queries_parent_id.go
deleted file mode 100644
index c0b610f12..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_parent_id.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// ParentIdQuery can be used to find child documents which belong to a
-// particular parent. Given the following mapping definition.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-parent-id-query.html
-type ParentIdQuery struct {
- typ string
- id string
- ignoreUnmapped *bool
- boost *float64
- queryName string
- innerHit *InnerHit
-}
-
-// NewParentIdQuery creates and initializes a new parent_id query.
-func NewParentIdQuery(typ, id string) *ParentIdQuery {
- return &ParentIdQuery{
- typ: typ,
- id: id,
- }
-}
-
-// Type sets the parent type.
-func (q *ParentIdQuery) Type(typ string) *ParentIdQuery {
- q.typ = typ
- return q
-}
-
-// Id sets the id.
-func (q *ParentIdQuery) Id(id string) *ParentIdQuery {
- q.id = id
- return q
-}
-
-// IgnoreUnmapped specifies whether unmapped types should be ignored.
-// If set to false, the query failes when an unmapped type is found.
-func (q *ParentIdQuery) IgnoreUnmapped(ignore bool) *ParentIdQuery {
- q.ignoreUnmapped = &ignore
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *ParentIdQuery) Boost(boost float64) *ParentIdQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName specifies the query name for the filter that can be used when
-// searching for matched filters per hit.
-func (q *ParentIdQuery) QueryName(queryName string) *ParentIdQuery {
- q.queryName = queryName
- return q
-}
-
-// InnerHit sets the inner hit definition in the scope of this query and
-// reusing the defined type and query.
-func (q *ParentIdQuery) InnerHit(innerHit *InnerHit) *ParentIdQuery {
- q.innerHit = innerHit
- return q
-}
-
-// Source returns JSON for the parent_id query.
-func (q *ParentIdQuery) Source() (interface{}, error) {
- // {
- // "parent_id" : {
- // "type" : "blog",
- // "id" : "1"
- // }
- // }
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["parent_id"] = query
-
- query["type"] = q.typ
- query["id"] = q.id
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.ignoreUnmapped != nil {
- query["ignore_unmapped"] = *q.ignoreUnmapped
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
- if q.innerHit != nil {
- src, err := q.innerHit.Source()
- if err != nil {
- return nil, err
- }
- query["inner_hits"] = src
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_parent_id_test.go b/vendor/github.com/olivere/elastic/search_queries_parent_id_test.go
deleted file mode 100644
index 0d18f216a..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_parent_id_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestParentIdQueryTest(t *testing.T) {
- tests := []struct {
- Query Query
- Expected string
- }{
- // #0
- {
- Query: NewParentIdQuery("blog_tag", "1"),
- Expected: `{"parent_id":{"id":"1","type":"blog_tag"}}`,
- },
- // #1
- {
- Query: NewParentIdQuery("blog_tag", "1").IgnoreUnmapped(true),
- Expected: `{"parent_id":{"id":"1","ignore_unmapped":true,"type":"blog_tag"}}`,
- },
- // #2
- {
- Query: NewParentIdQuery("blog_tag", "1").IgnoreUnmapped(false),
- Expected: `{"parent_id":{"id":"1","ignore_unmapped":false,"type":"blog_tag"}}`,
- },
- // #3
- {
- Query: NewParentIdQuery("blog_tag", "1").IgnoreUnmapped(true).Boost(5).QueryName("my_parent_query"),
- Expected: `{"parent_id":{"_name":"my_parent_query","boost":5,"id":"1","ignore_unmapped":true,"type":"blog_tag"}}`,
- },
- }
-
- for i, tt := range tests {
- src, err := tt.Query.Source()
- if err != nil {
- t.Fatalf("#%d: encoding Source failed: %v", i, err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("#%d: marshaling to JSON failed: %v", i, err)
- }
- if want, got := tt.Expected, string(data); want != got {
- t.Fatalf("#%d: expected\n%s\ngot:\n%s", i, want, got)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_percolator.go b/vendor/github.com/olivere/elastic/search_queries_percolator.go
deleted file mode 100644
index a7605655b..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_percolator.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// PercolatorQuery can be used to match queries stored in an index.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-percolate-query.html
-type PercolatorQuery struct {
- field string
- documentType string // deprecated
- document interface{}
- indexedDocumentIndex string
- indexedDocumentType string
- indexedDocumentId string
- indexedDocumentRouting string
- indexedDocumentPreference string
- indexedDocumentVersion *int64
-}
-
-// NewPercolatorQuery creates and initializes a new Percolator query.
-func NewPercolatorQuery() *PercolatorQuery {
- return &PercolatorQuery{}
-}
-
-func (q *PercolatorQuery) Field(field string) *PercolatorQuery {
- q.field = field
- return q
-}
-
-// Deprecated: DocumentType is deprecated as of 6.0.
-func (q *PercolatorQuery) DocumentType(typ string) *PercolatorQuery {
- q.documentType = typ
- return q
-}
-
-func (q *PercolatorQuery) Document(doc interface{}) *PercolatorQuery {
- q.document = doc
- return q
-}
-
-func (q *PercolatorQuery) IndexedDocumentIndex(index string) *PercolatorQuery {
- q.indexedDocumentIndex = index
- return q
-}
-
-func (q *PercolatorQuery) IndexedDocumentType(typ string) *PercolatorQuery {
- q.indexedDocumentType = typ
- return q
-}
-
-func (q *PercolatorQuery) IndexedDocumentId(id string) *PercolatorQuery {
- q.indexedDocumentId = id
- return q
-}
-
-func (q *PercolatorQuery) IndexedDocumentRouting(routing string) *PercolatorQuery {
- q.indexedDocumentRouting = routing
- return q
-}
-
-func (q *PercolatorQuery) IndexedDocumentPreference(preference string) *PercolatorQuery {
- q.indexedDocumentPreference = preference
- return q
-}
-
-func (q *PercolatorQuery) IndexedDocumentVersion(version int64) *PercolatorQuery {
- q.indexedDocumentVersion = &version
- return q
-}
-
-// Source returns JSON for the percolate query.
-func (q *PercolatorQuery) Source() (interface{}, error) {
- if len(q.field) == 0 {
- return nil, errors.New("elastic: Field is required in PercolatorQuery")
- }
- if q.document == nil {
- return nil, errors.New("elastic: Document is required in PercolatorQuery")
- }
-
- // {
- // "percolate" : { ... }
- // }
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["percolate"] = params
- params["field"] = q.field
- if q.documentType != "" {
- params["document_type"] = q.documentType
- }
- params["document"] = q.document
- if len(q.indexedDocumentIndex) > 0 {
- params["index"] = q.indexedDocumentIndex
- }
- if len(q.indexedDocumentType) > 0 {
- params["type"] = q.indexedDocumentType
- }
- if len(q.indexedDocumentId) > 0 {
- params["id"] = q.indexedDocumentId
- }
- if len(q.indexedDocumentRouting) > 0 {
- params["routing"] = q.indexedDocumentRouting
- }
- if len(q.indexedDocumentPreference) > 0 {
- params["preference"] = q.indexedDocumentPreference
- }
- if q.indexedDocumentVersion != nil {
- params["version"] = *q.indexedDocumentVersion
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_percolator_test.go b/vendor/github.com/olivere/elastic/search_queries_percolator_test.go
deleted file mode 100644
index edc7be626..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_percolator_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestPercolatorQuery(t *testing.T) {
- q := NewPercolatorQuery().
- Field("query").
- Document(map[string]interface{}{
- "message": "Some message",
- })
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percolate":{"document":{"message":"Some message"},"field":"query"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercolatorQueryWithDetails(t *testing.T) {
- q := NewPercolatorQuery().
- Field("query").
- Document(map[string]interface{}{
- "message": "Some message",
- }).
- IndexedDocumentIndex("index").
- IndexedDocumentId("1").
- IndexedDocumentRouting("route").
- IndexedDocumentPreference("one").
- IndexedDocumentVersion(1)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"percolate":{"document":{"message":"Some message"},"field":"query","id":"1","index":"index","preference":"one","routing":"route","version":1}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPercolatorQueryWithMissingFields(t *testing.T) {
- q := NewPercolatorQuery() // no Field, Document, or Query
- _, err := q.Source()
- if err == nil {
- t.Fatal("expected error, got nil")
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_prefix.go b/vendor/github.com/olivere/elastic/search_queries_prefix.go
deleted file mode 100644
index 075bcc7ba..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_prefix.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// PrefixQuery matches documents that have fields containing terms
-// with a specified prefix (not analyzed).
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-prefix-query.html
-type PrefixQuery struct {
- name string
- prefix string
- boost *float64
- rewrite string
- queryName string
-}
-
-// NewPrefixQuery creates and initializes a new PrefixQuery.
-func NewPrefixQuery(name string, prefix string) *PrefixQuery {
- return &PrefixQuery{name: name, prefix: prefix}
-}
-
-// Boost sets the boost for this query.
-func (q *PrefixQuery) Boost(boost float64) *PrefixQuery {
- q.boost = &boost
- return q
-}
-
-func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery {
- q.rewrite = rewrite
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched_filters per hit.
-func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the query.
-func (q *PrefixQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["prefix"] = query
-
- if q.boost == nil && q.rewrite == "" && q.queryName == "" {
- query[q.name] = q.prefix
- } else {
- subQuery := make(map[string]interface{})
- subQuery["value"] = q.prefix
- if q.boost != nil {
- subQuery["boost"] = *q.boost
- }
- if q.rewrite != "" {
- subQuery["rewrite"] = q.rewrite
- }
- if q.queryName != "" {
- subQuery["_name"] = q.queryName
- }
- query[q.name] = subQuery
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go b/vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go
deleted file mode 100644
index 73950f1f3..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic_test
-
-import (
- "context"
-
- "github.com/olivere/elastic"
-)
-
-func ExamplePrefixQuery() {
- // Get a client to the local Elasticsearch instance.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // Define wildcard query
- q := elastic.NewPrefixQuery("user", "oli")
- q = q.QueryName("my_query_name")
-
- searchResult, err := client.Search().
- Index("twitter").
- Query(q).
- Pretty(true).
- Do(context.Background())
- if err != nil {
- // Handle error
- panic(err)
- }
- _ = searchResult
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_prefix_test.go b/vendor/github.com/olivere/elastic/search_queries_prefix_test.go
deleted file mode 100644
index 78d27b600..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_prefix_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestPrefixQuery(t *testing.T) {
- q := NewPrefixQuery("user", "ki")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"prefix":{"user":"ki"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPrefixQueryWithOptions(t *testing.T) {
- q := NewPrefixQuery("user", "ki")
- q = q.QueryName("my_query_name")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"prefix":{"user":{"_name":"my_query_name","value":"ki"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_query_string.go b/vendor/github.com/olivere/elastic/search_queries_query_string.go
deleted file mode 100644
index a52c8b1a5..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_query_string.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "fmt"
-)
-
-// QueryStringQuery uses the query parser in order to parse its content.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-query-string-query.html
-type QueryStringQuery struct {
- queryString string
- defaultField string
- defaultOperator string
- analyzer string
- quoteAnalyzer string
- quoteFieldSuffix string
- allowLeadingWildcard *bool
- lowercaseExpandedTerms *bool // Deprecated: Decision is now made by the analyzer
- enablePositionIncrements *bool
- analyzeWildcard *bool
- locale string // Deprecated: Decision is now made by the analyzer
- boost *float64
- fuzziness string
- fuzzyPrefixLength *int
- fuzzyMaxExpansions *int
- fuzzyRewrite string
- phraseSlop *int
- fields []string
- fieldBoosts map[string]*float64
- tieBreaker *float64
- rewrite string
- minimumShouldMatch string
- lenient *bool
- queryName string
- timeZone string
- maxDeterminizedStates *int
- escape *bool
- typ string
-}
-
-// NewQueryStringQuery creates and initializes a new QueryStringQuery.
-func NewQueryStringQuery(queryString string) *QueryStringQuery {
- return &QueryStringQuery{
- queryString: queryString,
- fields: make([]string, 0),
- fieldBoosts: make(map[string]*float64),
- }
-}
-
-// DefaultField specifies the field to run against when no prefix field
-// is specified. Only relevant when not explicitly adding fields the query
-// string will run against.
-func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery {
- q.defaultField = defaultField
- return q
-}
-
-// Field adds a field to run the query string against.
-func (q *QueryStringQuery) Field(field string) *QueryStringQuery {
- q.fields = append(q.fields, field)
- return q
-}
-
-// Type sets how multiple fields should be combined to build textual part queries,
-// e.g. "best_fields".
-func (q *QueryStringQuery) Type(typ string) *QueryStringQuery {
- q.typ = typ
- return q
-}
-
-// FieldWithBoost adds a field to run the query string against with a specific boost.
-func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery {
- q.fields = append(q.fields, field)
- q.fieldBoosts[field] = &boost
- return q
-}
-
-// TieBreaker is used when more than one field is used with the query string,
-// and combined queries are using dismax.
-func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery {
- q.tieBreaker = &tieBreaker
- return q
-}
-
-// DefaultOperator sets the boolean operator of the query parser used to
-// parse the query string.
-//
-// In default mode (OR) terms without any modifiers
-// are considered optional, e.g. "capital of Hungary" is equal to
-// "capital OR of OR Hungary".
-//
-// In AND mode, terms are considered to be in conjunction. The above mentioned
-// query is then parsed as "capital AND of AND Hungary".
-func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery {
- q.defaultOperator = operator
- return q
-}
-
-// Analyzer is an optional analyzer used to analyze the query string.
-// Note, if a field has search analyzer defined for it, then it will be used
-// automatically. Defaults to the smart search analyzer.
-func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery {
- q.analyzer = analyzer
- return q
-}
-
-// QuoteAnalyzer is an optional analyzer to be used to analyze the query string
-// for phrase searches. Note, if a field has search analyzer defined for it,
-// then it will be used automatically. Defaults to the smart search analyzer.
-func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery {
- q.quoteAnalyzer = quoteAnalyzer
- return q
-}
-
-// MaxDeterminizedState protects against too-difficult regular expression queries.
-func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery {
- q.maxDeterminizedStates = &maxDeterminizedStates
- return q
-}
-
-// AllowLeadingWildcard specifies whether leading wildcards should be allowed
-// or not (defaults to true).
-func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery {
- q.allowLeadingWildcard = &allowLeadingWildcard
- return q
-}
-
-// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy
-// and range queries are automatically lower-cased or not. Default is true.
-//
-// Deprecated: Decision is now made by the analyzer.
-func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery {
- q.lowercaseExpandedTerms = &lowercaseExpandedTerms
- return q
-}
-
-// EnablePositionIncrements indicates whether to enable position increments
-// in result query. Defaults to true.
-//
-// When set, result phrase and multi-phrase queries will be aware of position
-// increments. Useful when e.g. a StopFilter increases the position increment
-// of the token that follows an omitted token.
-func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery {
- q.enablePositionIncrements = &enablePositionIncrements
- return q
-}
-
-// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO".
-func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery {
- q.fuzziness = fuzziness
- return q
-}
-
-// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries.
-// Default is 1.
-func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery {
- q.fuzzyPrefixLength = &fuzzyPrefixLength
- return q
-}
-
-func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery {
- q.fuzzyMaxExpansions = &fuzzyMaxExpansions
- return q
-}
-
-func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery {
- q.fuzzyRewrite = fuzzyRewrite
- return q
-}
-
-// PhraseSlop sets the default slop for phrases. If zero, then exact matches
-// are required. Default value is zero.
-func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery {
- q.phraseSlop = &phraseSlop
- return q
-}
-
-// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries.
-func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery {
- q.analyzeWildcard = &analyzeWildcard
- return q
-}
-
-func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery {
- q.rewrite = rewrite
- return q
-}
-
-func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery {
- q.minimumShouldMatch = minimumShouldMatch
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery {
- q.boost = &boost
- return q
-}
-
-// QuoteFieldSuffix is an optional field name suffix to automatically
-// try and add to the field searched when using quoted text.
-func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery {
- q.quoteFieldSuffix = quoteFieldSuffix
- return q
-}
-
-// Lenient indicates whether the query string parser should be lenient
-// when parsing field values. It defaults to the index setting and if not
-// set, defaults to false.
-func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery {
- q.lenient = &lenient
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched_filters per hit.
-func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery {
- q.queryName = queryName
- return q
-}
-
-// Locale specifies the locale to be used for string conversions.
-//
-// Deprecated: Decision is now made by the analyzer.
-func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery {
- q.locale = locale
- return q
-}
-
-// TimeZone can be used to automatically adjust to/from fields using a
-// timezone. Only used with date fields, of course.
-func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery {
- q.timeZone = timeZone
- return q
-}
-
-// Escape performs escaping of the query string.
-func (q *QueryStringQuery) Escape(escape bool) *QueryStringQuery {
- q.escape = &escape
- return q
-}
-
-// Source returns JSON for the query.
-func (q *QueryStringQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["query_string"] = query
-
- query["query"] = q.queryString
-
- if q.defaultField != "" {
- query["default_field"] = q.defaultField
- }
-
- if len(q.fields) > 0 {
- var fields []string
- for _, field := range q.fields {
- if boost, found := q.fieldBoosts[field]; found {
- if boost != nil {
- fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
- } else {
- fields = append(fields, field)
- }
- } else {
- fields = append(fields, field)
- }
- }
- query["fields"] = fields
- }
-
- if q.tieBreaker != nil {
- query["tie_breaker"] = *q.tieBreaker
- }
- if q.defaultOperator != "" {
- query["default_operator"] = q.defaultOperator
- }
- if q.analyzer != "" {
- query["analyzer"] = q.analyzer
- }
- if q.quoteAnalyzer != "" {
- query["quote_analyzer"] = q.quoteAnalyzer
- }
- if q.maxDeterminizedStates != nil {
- query["max_determinized_states"] = *q.maxDeterminizedStates
- }
- if q.allowLeadingWildcard != nil {
- query["allow_leading_wildcard"] = *q.allowLeadingWildcard
- }
- if q.lowercaseExpandedTerms != nil {
- query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms
- }
- if q.enablePositionIncrements != nil {
- query["enable_position_increments"] = *q.enablePositionIncrements
- }
- if q.fuzziness != "" {
- query["fuzziness"] = q.fuzziness
- }
- if q.boost != nil {
- query["boost"] = *q.boost
- }
- if q.fuzzyPrefixLength != nil {
- query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength
- }
- if q.fuzzyMaxExpansions != nil {
- query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions
- }
- if q.fuzzyRewrite != "" {
- query["fuzzy_rewrite"] = q.fuzzyRewrite
- }
- if q.phraseSlop != nil {
- query["phrase_slop"] = *q.phraseSlop
- }
- if q.analyzeWildcard != nil {
- query["analyze_wildcard"] = *q.analyzeWildcard
- }
- if q.rewrite != "" {
- query["rewrite"] = q.rewrite
- }
- if q.minimumShouldMatch != "" {
- query["minimum_should_match"] = q.minimumShouldMatch
- }
- if q.quoteFieldSuffix != "" {
- query["quote_field_suffix"] = q.quoteFieldSuffix
- }
- if q.lenient != nil {
- query["lenient"] = *q.lenient
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
- if q.locale != "" {
- query["locale"] = q.locale
- }
- if q.timeZone != "" {
- query["time_zone"] = q.timeZone
- }
- if q.escape != nil {
- query["escape"] = *q.escape
- }
- if q.typ != "" {
- query["type"] = q.typ
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_query_string_test.go b/vendor/github.com/olivere/elastic/search_queries_query_string_test.go
deleted file mode 100644
index 5030c3382..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_query_string_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestQueryStringQuery(t *testing.T) {
- q := NewQueryStringQuery(`this AND that OR thus`)
- q = q.DefaultField("content")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query_string":{"default_field":"content","query":"this AND that OR thus"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestQueryStringQueryTimeZone(t *testing.T) {
- q := NewQueryStringQuery(`tweet_date:[2015-01-01 TO 2017-12-31]`)
- q = q.TimeZone("Europe/Berlin")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query_string":{"query":"tweet_date:[2015-01-01 TO 2017-12-31]","time_zone":"Europe/Berlin"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_range.go b/vendor/github.com/olivere/elastic/search_queries_range.go
deleted file mode 100644
index 1b92dee23..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_range.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// RangeQuery matches documents with fields that have terms within a certain range.
-//
-// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-range-query.html
-type RangeQuery struct {
- name string
- from interface{}
- to interface{}
- timeZone string
- includeLower bool
- includeUpper bool
- boost *float64
- queryName string
- format string
-}
-
-// NewRangeQuery creates and initializes a new RangeQuery.
-func NewRangeQuery(name string) *RangeQuery {
- return &RangeQuery{name: name, includeLower: true, includeUpper: true}
-}
-
-// From indicates the from part of the RangeQuery.
-// Use nil to indicate an unbounded from part.
-func (q *RangeQuery) From(from interface{}) *RangeQuery {
- q.from = from
- return q
-}
-
-// Gt indicates a greater-than value for the from part.
-// Use nil to indicate an unbounded from part.
-func (q *RangeQuery) Gt(from interface{}) *RangeQuery {
- q.from = from
- q.includeLower = false
- return q
-}
-
-// Gte indicates a greater-than-or-equal value for the from part.
-// Use nil to indicate an unbounded from part.
-func (q *RangeQuery) Gte(from interface{}) *RangeQuery {
- q.from = from
- q.includeLower = true
- return q
-}
-
-// To indicates the to part of the RangeQuery.
-// Use nil to indicate an unbounded to part.
-func (q *RangeQuery) To(to interface{}) *RangeQuery {
- q.to = to
- return q
-}
-
-// Lt indicates a less-than value for the to part.
-// Use nil to indicate an unbounded to part.
-func (q *RangeQuery) Lt(to interface{}) *RangeQuery {
- q.to = to
- q.includeUpper = false
- return q
-}
-
-// Lte indicates a less-than-or-equal value for the to part.
-// Use nil to indicate an unbounded to part.
-func (q *RangeQuery) Lte(to interface{}) *RangeQuery {
- q.to = to
- q.includeUpper = true
- return q
-}
-
-// IncludeLower indicates whether the lower bound should be included or not.
-// Defaults to true.
-func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery {
- q.includeLower = includeLower
- return q
-}
-
-// IncludeUpper indicates whether the upper bound should be included or not.
-// Defaults to true.
-func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery {
- q.includeUpper = includeUpper
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *RangeQuery) Boost(boost float64) *RangeQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched_filters per hit.
-func (q *RangeQuery) QueryName(queryName string) *RangeQuery {
- q.queryName = queryName
- return q
-}
-
-// TimeZone is used for date fields. In that case, we can adjust the
-// from/to fields using a timezone.
-func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery {
- q.timeZone = timeZone
- return q
-}
-
-// Format is used for date fields. In that case, we can set the format
-// to be used instead of the mapper format.
-func (q *RangeQuery) Format(format string) *RangeQuery {
- q.format = format
- return q
-}
-
-// Source returns JSON for the query.
-func (q *RangeQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- rangeQ := make(map[string]interface{})
- source["range"] = rangeQ
-
- params := make(map[string]interface{})
- rangeQ[q.name] = params
-
- params["from"] = q.from
- params["to"] = q.to
- if q.timeZone != "" {
- params["time_zone"] = q.timeZone
- }
- if q.format != "" {
- params["format"] = q.format
- }
- if q.boost != nil {
- params["boost"] = *q.boost
- }
- params["include_lower"] = q.includeLower
- params["include_upper"] = q.includeUpper
-
- if q.queryName != "" {
- rangeQ["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_range_test.go b/vendor/github.com/olivere/elastic/search_queries_range_test.go
deleted file mode 100644
index 86d018a86..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_range_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestRangeQuery(t *testing.T) {
- q := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01").Boost(3)
- q = q.QueryName("my_query")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"_name":"my_query","postDate":{"boost":3,"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeQueryWithTimeZone(t *testing.T) {
- q := NewRangeQuery("born").
- Gte("2012-01-01").
- Lte("now").
- TimeZone("+1:00")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRangeQueryWithFormat(t *testing.T) {
- q := NewRangeQuery("born").
- Gte("2012/01/01").
- Lte("now").
- Format("yyyy/MM/dd")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_raw_string.go b/vendor/github.com/olivere/elastic/search_queries_raw_string.go
deleted file mode 100644
index 3f9685c41..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_raw_string.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2012-present Oliver Eilhard, John Stanford. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "encoding/json"
-
-// RawStringQuery can be used to treat a string representation of an ES query
-// as a Query. Example usage:
-// q := RawStringQuery("{\"match_all\":{}}")
-// db.Search().Query(q).From(1).Size(100).Do()
-type RawStringQuery string
-
-// NewRawStringQuery ininitializes a new RawStringQuery.
-// It is the same as RawStringQuery(q).
-func NewRawStringQuery(q string) RawStringQuery {
- return RawStringQuery(q)
-}
-
-// Source returns the JSON encoded body
-func (q RawStringQuery) Source() (interface{}, error) {
- var f interface{}
- err := json.Unmarshal([]byte(q), &f)
- return f, err
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_raw_string_test.go b/vendor/github.com/olivere/elastic/search_queries_raw_string_test.go
deleted file mode 100644
index 5bb3dac41..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_raw_string_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestRawStringQuery(t *testing.T) {
- q := RawStringQuery(`{"match_all":{}}`)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_all":{}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestNewRawStringQuery(t *testing.T) {
- q := NewRawStringQuery(`{"match_all":{}}`)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"match_all":{}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_regexp.go b/vendor/github.com/olivere/elastic/search_queries_regexp.go
deleted file mode 100644
index a08b533cb..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_regexp.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// RegexpQuery allows you to use regular expression term queries.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-regexp-query.html
-type RegexpQuery struct {
- name string
- regexp string
- flags string
- boost *float64
- rewrite string
- queryName string
- maxDeterminizedStates *int
-}
-
-// NewRegexpQuery creates and initializes a new RegexpQuery.
-func NewRegexpQuery(name string, regexp string) *RegexpQuery {
- return &RegexpQuery{name: name, regexp: regexp}
-}
-
-// Flags sets the regexp flags.
-func (q *RegexpQuery) Flags(flags string) *RegexpQuery {
- q.flags = flags
- return q
-}
-
-// MaxDeterminizedStates protects against complex regular expressions.
-func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery {
- q.maxDeterminizedStates = &maxDeterminizedStates
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *RegexpQuery) Boost(boost float64) *RegexpQuery {
- q.boost = &boost
- return q
-}
-
-func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery {
- q.rewrite = rewrite
- return q
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched_filters per hit
-func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns the JSON-serializable query data.
-func (q *RegexpQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
- query := make(map[string]interface{})
- source["regexp"] = query
-
- x := make(map[string]interface{})
- x["value"] = q.regexp
- if q.flags != "" {
- x["flags"] = q.flags
- }
- if q.maxDeterminizedStates != nil {
- x["max_determinized_states"] = *q.maxDeterminizedStates
- }
- if q.boost != nil {
- x["boost"] = *q.boost
- }
- if q.rewrite != "" {
- x["rewrite"] = q.rewrite
- }
- if q.queryName != "" {
- x["name"] = q.queryName
- }
- query[q.name] = x
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_regexp_test.go b/vendor/github.com/olivere/elastic/search_queries_regexp_test.go
deleted file mode 100644
index d30c0a36d..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_regexp_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestRegexpQuery(t *testing.T) {
- q := NewRegexpQuery("name.first", "s.*y")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"regexp":{"name.first":{"value":"s.*y"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestRegexpQueryWithOptions(t *testing.T) {
- q := NewRegexpQuery("name.first", "s.*y").
- Boost(1.2).
- Flags("INTERSECTION|COMPLEMENT|EMPTY").
- QueryName("my_query_name")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"regexp":{"name.first":{"boost":1.2,"flags":"INTERSECTION|COMPLEMENT|EMPTY","name":"my_query_name","value":"s.*y"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_script.go b/vendor/github.com/olivere/elastic/search_queries_script.go
deleted file mode 100644
index d430f4c8f..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_script.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// ScriptQuery allows to define scripts as filters.
-//
-// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-script-query.html
-type ScriptQuery struct {
- script *Script
- queryName string
-}
-
-// NewScriptQuery creates and initializes a new ScriptQuery.
-func NewScriptQuery(script *Script) *ScriptQuery {
- return &ScriptQuery{
- script: script,
- }
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched_filters per hit
-func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the query.
-func (q *ScriptQuery) Source() (interface{}, error) {
- if q.script == nil {
- return nil, errors.New("ScriptQuery expected a script")
- }
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["script"] = params
-
- src, err := q.script.Source()
- if err != nil {
- return nil, err
- }
- params["script"] = src
-
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_script_test.go b/vendor/github.com/olivere/elastic/search_queries_script_test.go
deleted file mode 100644
index 66ec106d5..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_script_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestScriptQuery(t *testing.T) {
- q := NewScriptQuery(NewScript("doc['num1'.value > 1"))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"script":{"script":{"source":"doc['num1'.value \u003e 1"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestScriptQueryWithParams(t *testing.T) {
- q := NewScriptQuery(NewScript("doc['num1'.value > 1"))
- q = q.QueryName("MyQueryName")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"script":{"_name":"MyQueryName","script":{"source":"doc['num1'.value \u003e 1"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_simple_query_string.go b/vendor/github.com/olivere/elastic/search_queries_simple_query_string.go
deleted file mode 100644
index 462ea5533..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_simple_query_string.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "fmt"
- "strings"
-)
-
-// SimpleQueryStringQuery is a query that uses the SimpleQueryParser
-// to parse its context. Unlike the regular query_string query,
-// the simple_query_string query will never throw an exception,
-// and discards invalid parts of the query.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-simple-query-string-query.html
-type SimpleQueryStringQuery struct {
- queryText string
- analyzer string
- operator string
- fields []string
- fieldBoosts map[string]*float64
- minimumShouldMatch string
- flags string
- boost *float64
- lowercaseExpandedTerms *bool
- lenient *bool
- analyzeWildcard *bool
- locale string
- queryName string
-}
-
-// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery.
-func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery {
- return &SimpleQueryStringQuery{
- queryText: text,
- fields: make([]string, 0),
- fieldBoosts: make(map[string]*float64),
- }
-}
-
-// Field adds a field to run the query against.
-func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery {
- q.fields = append(q.fields, field)
- return q
-}
-
-// Field adds a field to run the query against with a specific boost.
-func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery {
- q.fields = append(q.fields, field)
- q.fieldBoosts[field] = &boost
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used when
-// searching for matched_filters per hit.
-func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery {
- q.queryName = queryName
- return q
-}
-
-// Analyzer specifies the analyzer to use for the query.
-func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery {
- q.analyzer = analyzer
- return q
-}
-
-// DefaultOperator specifies the default operator for the query.
-func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery {
- q.operator = defaultOperator
- return q
-}
-
-// Flags sets the flags for the query.
-func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery {
- q.flags = flags
- return q
-}
-
-// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy
-// and range queries are automatically lower-cased or not. Default is true.
-func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery {
- q.lowercaseExpandedTerms = &lowercaseExpandedTerms
- return q
-}
-
-func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery {
- q.locale = locale
- return q
-}
-
-// Lenient indicates whether the query string parser should be lenient
-// when parsing field values. It defaults to the index setting and if not
-// set, defaults to false.
-func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery {
- q.lenient = &lenient
- return q
-}
-
-// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries.
-func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery {
- q.analyzeWildcard = &analyzeWildcard
- return q
-}
-
-func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery {
- q.minimumShouldMatch = minimumShouldMatch
- return q
-}
-
-// Source returns JSON for the query.
-func (q *SimpleQueryStringQuery) Source() (interface{}, error) {
- // {
- // "simple_query_string" : {
- // "query" : "\"fried eggs\" +(eggplant | potato) -frittata",
- // "analyzer" : "snowball",
- // "fields" : ["body^5","_all"],
- // "default_operator" : "and"
- // }
- // }
-
- source := make(map[string]interface{})
-
- query := make(map[string]interface{})
- source["simple_query_string"] = query
-
- query["query"] = q.queryText
-
- if len(q.fields) > 0 {
- var fields []string
- for _, field := range q.fields {
- if boost, found := q.fieldBoosts[field]; found {
- if boost != nil {
- fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
- } else {
- fields = append(fields, field)
- }
- } else {
- fields = append(fields, field)
- }
- }
- query["fields"] = fields
- }
-
- if q.flags != "" {
- query["flags"] = q.flags
- }
- if q.analyzer != "" {
- query["analyzer"] = q.analyzer
- }
- if q.operator != "" {
- query["default_operator"] = strings.ToLower(q.operator)
- }
- if q.lowercaseExpandedTerms != nil {
- query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms
- }
- if q.lenient != nil {
- query["lenient"] = *q.lenient
- }
- if q.analyzeWildcard != nil {
- query["analyze_wildcard"] = *q.analyzeWildcard
- }
- if q.locale != "" {
- query["locale"] = q.locale
- }
- if q.queryName != "" {
- query["_name"] = q.queryName
- }
- if q.minimumShouldMatch != "" {
- query["minimum_should_match"] = q.minimumShouldMatch
- }
- if q.boost != nil {
- query["boost"] = *q.boost
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go b/vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go
deleted file mode 100644
index ea4a341ec..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestSimpleQueryStringQuery(t *testing.T) {
- q := NewSimpleQueryStringQuery(`"fried eggs" +(eggplant | potato) -frittata`)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"simple_query_string":{"query":"\"fried eggs\" +(eggplant | potato) -frittata"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSimpleQueryStringQueryExec(t *testing.T) {
- // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(NewSimpleQueryStringQuery("+Golang +Elasticsearch")).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 1 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 1 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_slice.go b/vendor/github.com/olivere/elastic/search_queries_slice.go
deleted file mode 100644
index e1b1db928..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_slice.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// SliceQuery allows to partition the documents into several slices.
-// It is used e.g. to slice scroll operations in Elasticsearch 5.0 or later.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
-// for details.
-type SliceQuery struct {
- field string
- id *int
- max *int
-}
-
-// NewSliceQuery creates a new SliceQuery.
-func NewSliceQuery() *SliceQuery {
- return &SliceQuery{}
-}
-
-// Field is the name of the field to slice against (_uid by default).
-func (s *SliceQuery) Field(field string) *SliceQuery {
- s.field = field
- return s
-}
-
-// Id is the id of the slice.
-func (s *SliceQuery) Id(id int) *SliceQuery {
- s.id = &id
- return s
-}
-
-// Max is the maximum number of slices.
-func (s *SliceQuery) Max(max int) *SliceQuery {
- s.max = &max
- return s
-}
-
-// Source returns the JSON body.
-func (s *SliceQuery) Source() (interface{}, error) {
- m := make(map[string]interface{})
- if s.field != "" {
- m["field"] = s.field
- }
- if s.id != nil {
- m["id"] = *s.id
- }
- if s.max != nil {
- m["max"] = *s.max
- }
- return m, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_slice_test.go b/vendor/github.com/olivere/elastic/search_queries_slice_test.go
deleted file mode 100644
index 0589f4e29..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_slice_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSliceQuery(t *testing.T) {
- q := NewSliceQuery().Field("date").Id(0).Max(2)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"field":"date","id":0,"max":2}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_term.go b/vendor/github.com/olivere/elastic/search_queries_term.go
deleted file mode 100644
index 9a445e0ec..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_term.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TermQuery finds documents that contain the exact term specified
-// in the inverted index.
-//
-// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-term-query.html
-type TermQuery struct {
- name string
- value interface{}
- boost *float64
- queryName string
-}
-
-// NewTermQuery creates and initializes a new TermQuery.
-func NewTermQuery(name string, value interface{}) *TermQuery {
- return &TermQuery{name: name, value: value}
-}
-
-// Boost sets the boost for this query.
-func (q *TermQuery) Boost(boost float64) *TermQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched_filters per hit
-func (q *TermQuery) QueryName(queryName string) *TermQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the query.
-func (q *TermQuery) Source() (interface{}, error) {
- // {"term":{"name":"value"}}
- source := make(map[string]interface{})
- tq := make(map[string]interface{})
- source["term"] = tq
-
- if q.boost == nil && q.queryName == "" {
- tq[q.name] = q.value
- } else {
- subQ := make(map[string]interface{})
- subQ["value"] = q.value
- if q.boost != nil {
- subQ["boost"] = *q.boost
- }
- if q.queryName != "" {
- subQ["_name"] = q.queryName
- }
- tq[q.name] = subQ
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_term_test.go b/vendor/github.com/olivere/elastic/search_queries_term_test.go
deleted file mode 100644
index f800fa954..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_term_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestTermQuery(t *testing.T) {
- q := NewTermQuery("user", "ki")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"term":{"user":"ki"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermQueryWithOptions(t *testing.T) {
- q := NewTermQuery("user", "ki")
- q = q.Boost(2.79)
- q = q.QueryName("my_tq")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"term":{"user":{"_name":"my_tq","boost":2.79,"value":"ki"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_terms.go b/vendor/github.com/olivere/elastic/search_queries_terms.go
deleted file mode 100644
index 3649576dc..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_terms.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TermsQuery filters documents that have fields that match any
-// of the provided terms (not analyzed).
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-terms-query.html
-type TermsQuery struct {
- name string
- values []interface{}
- termsLookup *TermsLookup
- queryName string
- boost *float64
-}
-
-// NewTermsQuery creates and initializes a new TermsQuery.
-func NewTermsQuery(name string, values ...interface{}) *TermsQuery {
- q := &TermsQuery{
- name: name,
- values: make([]interface{}, 0),
- }
- if len(values) > 0 {
- q.values = append(q.values, values...)
- }
- return q
-}
-
-// TermsLookup adds terms lookup details to the query.
-func (q *TermsQuery) TermsLookup(lookup *TermsLookup) *TermsQuery {
- q.termsLookup = lookup
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *TermsQuery) Boost(boost float64) *TermsQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched_filters per hit
-func (q *TermsQuery) QueryName(queryName string) *TermsQuery {
- q.queryName = queryName
- return q
-}
-
-// Creates the query source for the term query.
-func (q *TermsQuery) Source() (interface{}, error) {
- // {"terms":{"name":["value1","value2"]}}
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["terms"] = params
-
- if q.termsLookup != nil {
- src, err := q.termsLookup.Source()
- if err != nil {
- return nil, err
- }
- params[q.name] = src
- } else {
- params[q.name] = q.values
- if q.boost != nil {
- params["boost"] = *q.boost
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_terms_set.go b/vendor/github.com/olivere/elastic/search_queries_terms_set.go
deleted file mode 100644
index be410a1a7..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_terms_set.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TermsSetQuery returns any documents that match with at least
-// one or more of the provided terms. The terms are not analyzed
-// and thus must match exactly. The number of terms that must
-// match varies per document and is either controlled by a
-// minimum should match field or computed per document in a
-// minimum should match script.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/query-dsl-terms-set-query.html
-type TermsSetQuery struct {
- name string
- values []interface{}
- minimumShouldMatchField string
- minimumShouldMatchScript *Script
- queryName string
- boost *float64
-}
-
-// NewTermsSetQuery creates and initializes a new TermsSetQuery.
-func NewTermsSetQuery(name string, values ...interface{}) *TermsSetQuery {
- q := &TermsSetQuery{
- name: name,
- }
- if len(values) > 0 {
- q.values = append(q.values, values...)
- }
- return q
-}
-
-// MinimumShouldMatchField specifies the field to match.
-func (q *TermsSetQuery) MinimumShouldMatchField(minimumShouldMatchField string) *TermsSetQuery {
- q.minimumShouldMatchField = minimumShouldMatchField
- return q
-}
-
-// MinimumShouldMatchScript specifies the script to match.
-func (q *TermsSetQuery) MinimumShouldMatchScript(minimumShouldMatchScript *Script) *TermsSetQuery {
- q.minimumShouldMatchScript = minimumShouldMatchScript
- return q
-}
-
-// Boost sets the boost for this query.
-func (q *TermsSetQuery) Boost(boost float64) *TermsSetQuery {
- q.boost = &boost
- return q
-}
-
-// QueryName sets the query name for the filter that can be used
-// when searching for matched_filters per hit
-func (q *TermsSetQuery) QueryName(queryName string) *TermsSetQuery {
- q.queryName = queryName
- return q
-}
-
-// Source creates the query source for the term query.
-func (q *TermsSetQuery) Source() (interface{}, error) {
- // {"terms_set":{"codes":{"terms":["abc","def"],"minimum_should_match_field":"required_matches"}}}
- source := make(map[string]interface{})
- inner := make(map[string]interface{})
- params := make(map[string]interface{})
- inner[q.name] = params
- source["terms_set"] = inner
-
- // terms
- params["terms"] = q.values
-
- // minimum_should_match_field
- if match := q.minimumShouldMatchField; match != "" {
- params["minimum_should_match_field"] = match
- }
-
- // minimum_should_match_script
- if match := q.minimumShouldMatchScript; match != nil {
- src, err := match.Source()
- if err != nil {
- return nil, err
- }
- params["minimum_should_match_script"] = src
- }
-
- // Common parameters for all queries
- if q.boost != nil {
- params["boost"] = *q.boost
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_terms_set_test.go b/vendor/github.com/olivere/elastic/search_queries_terms_set_test.go
deleted file mode 100644
index e13fbfb2f..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_terms_set_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestTermsSetQueryWithField(t *testing.T) {
- q := NewTermsSetQuery("codes", "abc", "def", "ghi").MinimumShouldMatchField("required_matches")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms_set":{"codes":{"minimum_should_match_field":"required_matches","terms":["abc","def","ghi"]}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsSetQueryWithScript(t *testing.T) {
- q := NewTermsSetQuery("codes", "abc", "def", "ghi").
- MinimumShouldMatchScript(
- NewScript(`Math.min(params.num_terms, doc['required_matches'].value)`),
- )
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms_set":{"codes":{"minimum_should_match_script":{"source":"Math.min(params.num_terms, doc['required_matches'].value)"},"terms":["abc","def","ghi"]}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchTermsSetQuery(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(
- NewTermsSetQuery("user", "olivere", "sandrae").
- MinimumShouldMatchField("retweets"),
- ).
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if got, want := searchResult.Hits.TotalHits, int64(3); got != want {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
- }
- if got, want := len(searchResult.Hits.Hits), 3; got != want {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_terms_test.go b/vendor/github.com/olivere/elastic/search_queries_terms_test.go
deleted file mode 100644
index 72f472d17..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_terms_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestTermsQuery(t *testing.T) {
- q := NewTermsQuery("user", "ki")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"user":["ki"]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsQueryWithEmptyArray(t *testing.T) {
- included := make([]interface{}, 0)
- q := NewTermsQuery("tags", included...)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"tags":[]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermsQueryWithTermsLookup(t *testing.T) {
- q := NewTermsQuery("user").
- TermsLookup(NewTermsLookup().Index("users").Type("user").Id("2").Path("followers"))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"user":{"id":"2","index":"users","path":"followers","type":"user"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermQuerysWithOptions(t *testing.T) {
- q := NewTermsQuery("user", "ki", "ko")
- q = q.Boost(2.79)
- q = q.QueryName("my_tq")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"terms":{"_name":"my_tq","boost":2.79,"user":["ki","ko"]}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_type.go b/vendor/github.com/olivere/elastic/search_queries_type.go
deleted file mode 100644
index e7aef30df..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_type.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TypeQuery filters documents matching the provided document / mapping type.
-//
-// For details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-type-query.html
-type TypeQuery struct {
- typ string
-}
-
-func NewTypeQuery(typ string) *TypeQuery {
- return &TypeQuery{typ: typ}
-}
-
-// Source returns JSON for the query.
-func (q *TypeQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["type"] = params
- params["value"] = q.typ
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_type_test.go b/vendor/github.com/olivere/elastic/search_queries_type_test.go
deleted file mode 100644
index 176b82abb..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_type_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestTypeQuery(t *testing.T) {
- q := NewTypeQuery("my_type")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"type":{"value":"my_type"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_wildcard.go b/vendor/github.com/olivere/elastic/search_queries_wildcard.go
deleted file mode 100644
index ea8a0901c..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_wildcard.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// WildcardQuery matches documents that have fields matching a wildcard
-// expression (not analyzed). Supported wildcards are *, which matches
-// any character sequence (including the empty one), and ?, which matches
-// any single character. Note this query can be slow, as it needs to iterate
-// over many terms. In order to prevent extremely slow wildcard queries,
-// a wildcard term should not start with one of the wildcards * or ?.
-// The wildcard query maps to Lucene WildcardQuery.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-wildcard-query.html
-type WildcardQuery struct {
- name string
- wildcard string
- boost *float64
- rewrite string
- queryName string
-}
-
-// NewWildcardQuery creates and initializes a new WildcardQuery.
-func NewWildcardQuery(name, wildcard string) *WildcardQuery {
- return &WildcardQuery{
- name: name,
- wildcard: wildcard,
- }
-}
-
-// Boost sets the boost for this query.
-func (q *WildcardQuery) Boost(boost float64) *WildcardQuery {
- q.boost = &boost
- return q
-}
-
-func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery {
- q.rewrite = rewrite
- return q
-}
-
-// QueryName sets the name of this query.
-func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns the JSON serializable body of this query.
-func (q *WildcardQuery) Source() (interface{}, error) {
- // {
- // "wildcard" : {
- // "user" : {
- // "wildcard" : "ki*y",
- // "boost" : 1.0
- // }
- // }
-
- source := make(map[string]interface{})
-
- query := make(map[string]interface{})
- source["wildcard"] = query
-
- wq := make(map[string]interface{})
- query[q.name] = wq
-
- wq["wildcard"] = q.wildcard
-
- if q.boost != nil {
- wq["boost"] = *q.boost
- }
- if q.rewrite != "" {
- wq["rewrite"] = q.rewrite
- }
- if q.queryName != "" {
- wq["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_queries_wildcard_test.go b/vendor/github.com/olivere/elastic/search_queries_wildcard_test.go
deleted file mode 100644
index b41c8ab7b..000000000
--- a/vendor/github.com/olivere/elastic/search_queries_wildcard_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic_test
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/olivere/elastic"
-)
-
-func ExampleWildcardQuery() {
- // Get a client to the local Elasticsearch instance.
- client, err := elastic.NewClient()
- if err != nil {
- // Handle error
- panic(err)
- }
-
- // Define wildcard query
- q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2)
- searchResult, err := client.Search().
- Index("twitter"). // search in index "twitter"
- Query(q). // use wildcard query defined above
- Do(context.TODO()) // execute
- if err != nil {
- // Handle error
- panic(err)
- }
- _ = searchResult
-}
-
-func TestWildcardQuery(t *testing.T) {
- q := elastic.NewWildcardQuery("user", "ki*y??")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"wildcard":{"user":{"wildcard":"ki*y??"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestWildcardQueryWithBoost(t *testing.T) {
- q := elastic.NewWildcardQuery("user", "ki*y??").Boost(1.2)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"wildcard":{"user":{"boost":1.2,"wildcard":"ki*y??"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_request.go b/vendor/github.com/olivere/elastic/search_request.go
deleted file mode 100644
index 8f08e73ff..000000000
--- a/vendor/github.com/olivere/elastic/search_request.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "strings"
-)
-
-// SearchRequest combines a search request and its
-// query details (see SearchSource).
-// It is used in combination with MultiSearch.
-type SearchRequest struct {
- searchType string
- indices []string
- types []string
- routing *string
- preference *string
- requestCache *bool
- ignoreUnavailable *bool
- allowNoIndices *bool
- expandWildcards string
- scroll string
- source interface{}
-}
-
-// NewSearchRequest creates a new search request.
-func NewSearchRequest() *SearchRequest {
- return &SearchRequest{}
-}
-
-// SearchRequest must be one of "dfs_query_then_fetch" or
-// "query_then_fetch".
-func (r *SearchRequest) SearchType(searchType string) *SearchRequest {
- r.searchType = searchType
- return r
-}
-
-// SearchTypeDfsQueryThenFetch sets search type to dfs_query_then_fetch.
-func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest {
- return r.SearchType("dfs_query_then_fetch")
-}
-
-// SearchTypeQueryThenFetch sets search type to query_then_fetch.
-func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest {
- return r.SearchType("query_then_fetch")
-}
-
-func (r *SearchRequest) Index(indices ...string) *SearchRequest {
- r.indices = append(r.indices, indices...)
- return r
-}
-
-func (r *SearchRequest) HasIndices() bool {
- return len(r.indices) > 0
-}
-
-func (r *SearchRequest) Type(types ...string) *SearchRequest {
- r.types = append(r.types, types...)
- return r
-}
-
-func (r *SearchRequest) Routing(routing string) *SearchRequest {
- r.routing = &routing
- return r
-}
-
-func (r *SearchRequest) Routings(routings ...string) *SearchRequest {
- if routings != nil {
- routings := strings.Join(routings, ",")
- r.routing = &routings
- } else {
- r.routing = nil
- }
- return r
-}
-
-func (r *SearchRequest) Preference(preference string) *SearchRequest {
- r.preference = &preference
- return r
-}
-
-func (r *SearchRequest) RequestCache(requestCache bool) *SearchRequest {
- r.requestCache = &requestCache
- return r
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *SearchRequest) IgnoreUnavailable(ignoreUnavailable bool) *SearchRequest {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices
-// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
-func (s *SearchRequest) AllowNoIndices(allowNoIndices bool) *SearchRequest {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *SearchRequest) ExpandWildcards(expandWildcards string) *SearchRequest {
- s.expandWildcards = expandWildcards
- return s
-}
-
-func (r *SearchRequest) Scroll(scroll string) *SearchRequest {
- r.scroll = scroll
- return r
-}
-
-func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest {
- return r.Source(searchSource)
-}
-
-func (r *SearchRequest) Source(source interface{}) *SearchRequest {
- r.source = source
- return r
-}
-
-// header is used e.g. by MultiSearch to get information about the search header
-// of one SearchRequest.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html
-func (r *SearchRequest) header() interface{} {
- h := make(map[string]interface{})
- if r.searchType != "" {
- h["search_type"] = r.searchType
- }
-
- switch len(r.indices) {
- case 0:
- case 1:
- h["index"] = r.indices[0]
- default:
- h["indices"] = r.indices
- }
-
- switch len(r.types) {
- case 0:
- case 1:
- h["type"] = r.types[0]
- default:
- h["types"] = r.types
- }
-
- if r.routing != nil && *r.routing != "" {
- h["routing"] = *r.routing
- }
- if r.preference != nil && *r.preference != "" {
- h["preference"] = *r.preference
- }
- if r.requestCache != nil {
- h["request_cache"] = *r.requestCache
- }
- if r.ignoreUnavailable != nil {
- h["ignore_unavailable"] = *r.ignoreUnavailable
- }
- if r.allowNoIndices != nil {
- h["allow_no_indices"] = *r.allowNoIndices
- }
- if r.expandWildcards != "" {
- h["expand_wildcards"] = r.expandWildcards
- }
- if r.scroll != "" {
- h["scroll"] = r.scroll
- }
-
- return h
-}
-
-// Body allows to access the search body of the request, as generated by the DSL.
-// Notice that Body is read-only. You must not change the request body.
-//
-// Body is used e.g. by MultiSearch to get information about the search body
-// of one SearchRequest.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html
-func (r *SearchRequest) Body() (string, error) {
- switch t := r.source.(type) {
- default:
- body, err := json.Marshal(r.source)
- if err != nil {
- return "", err
- }
- return string(body), nil
- case *SearchSource:
- src, err := t.Source()
- if err != nil {
- return "", err
- }
- body, err := json.Marshal(src)
- if err != nil {
- return "", err
- }
- return string(body), nil
- case json.RawMessage:
- return string(t), nil
- case *json.RawMessage:
- return string(*t), nil
- case string:
- return t, nil
- case *string:
- if t != nil {
- return *t, nil
- }
- return "{}", nil
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_request_test.go b/vendor/github.com/olivere/elastic/search_request_test.go
deleted file mode 100644
index fa03af2c8..000000000
--- a/vendor/github.com/olivere/elastic/search_request_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- _ "net/http"
- "testing"
-)
-
-func TestSearchRequestIndex(t *testing.T) {
- builder := NewSearchRequest().Index("test")
- data, err := json.Marshal(builder.header())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"index":"test"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchRequestIndices(t *testing.T) {
- builder := NewSearchRequest().Index("test", "test2")
- data, err := json.Marshal(builder.header())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"indices":["test","test2"]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchRequestHasIndices(t *testing.T) {
- builder := NewSearchRequest()
- if builder.HasIndices() {
- t.Errorf("expected HasIndices to return true; got %v", builder.HasIndices())
- }
- builder = builder.Index("test", "test2")
- if !builder.HasIndices() {
- t.Errorf("expected HasIndices to return false; got %v", builder.HasIndices())
- }
-}
-
-func TestSearchRequestIgnoreUnavailable(t *testing.T) {
- builder := NewSearchRequest().Index("test").IgnoreUnavailable(true)
- data, err := json.Marshal(builder.header())
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"ignore_unavailable":true,"index":"test"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_source.go b/vendor/github.com/olivere/elastic/search_source.go
deleted file mode 100644
index 77b1c5093..000000000
--- a/vendor/github.com/olivere/elastic/search_source.go
+++ /dev/null
@@ -1,546 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "fmt"
-)
-
-// SearchSource enables users to build the search source.
-// It resembles the SearchSourceBuilder in Elasticsearch.
-type SearchSource struct {
- query Query
- postQuery Query
- sliceQuery Query
- from int
- size int
- explain *bool
- version *bool
- sorters []Sorter
- trackScores bool
- searchAfterSortValues []interface{}
- minScore *float64
- timeout string
- terminateAfter *int
- storedFieldNames []string
- docvalueFields []string
- scriptFields []*ScriptField
- fetchSourceContext *FetchSourceContext
- aggregations map[string]Aggregation
- highlight *Highlight
- globalSuggestText string
- suggesters []Suggester
- rescores []*Rescore
- defaultRescoreWindowSize *int
- indexBoosts map[string]float64
- stats []string
- innerHits map[string]*InnerHit
- collapse *CollapseBuilder
- profile bool
-}
-
-// NewSearchSource initializes a new SearchSource.
-func NewSearchSource() *SearchSource {
- return &SearchSource{
- from: -1,
- size: -1,
- trackScores: false,
- aggregations: make(map[string]Aggregation),
- indexBoosts: make(map[string]float64),
- innerHits: make(map[string]*InnerHit),
- }
-}
-
-// Query sets the query to use with this search source.
-func (s *SearchSource) Query(query Query) *SearchSource {
- s.query = query
- return s
-}
-
-// Profile specifies that this search source should activate the
-// Profile API for queries made on it.
-func (s *SearchSource) Profile(profile bool) *SearchSource {
- s.profile = profile
- return s
-}
-
-// PostFilter will be executed after the query has been executed and
-// only affects the search hits, not the aggregations.
-// This filter is always executed as the last filtering mechanism.
-func (s *SearchSource) PostFilter(postFilter Query) *SearchSource {
- s.postQuery = postFilter
- return s
-}
-
-// Slice allows partitioning the documents in multiple slices.
-// It is e.g. used to slice a scroll operation, supported in
-// Elasticsearch 5.0 or later.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
-// for details.
-func (s *SearchSource) Slice(sliceQuery Query) *SearchSource {
- s.sliceQuery = sliceQuery
- return s
-}
-
-// From index to start the search from. Defaults to 0.
-func (s *SearchSource) From(from int) *SearchSource {
- s.from = from
- return s
-}
-
-// Size is the number of search hits to return. Defaults to 10.
-func (s *SearchSource) Size(size int) *SearchSource {
- s.size = size
- return s
-}
-
-// MinScore sets the minimum score below which docs will be filtered out.
-func (s *SearchSource) MinScore(minScore float64) *SearchSource {
- s.minScore = &minScore
- return s
-}
-
-// Explain indicates whether each search hit should be returned with
-// an explanation of the hit (ranking).
-func (s *SearchSource) Explain(explain bool) *SearchSource {
- s.explain = &explain
- return s
-}
-
-// Version indicates whether each search hit should be returned with
-// a version associated to it.
-func (s *SearchSource) Version(version bool) *SearchSource {
- s.version = &version
- return s
-}
-
-// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms".
-func (s *SearchSource) Timeout(timeout string) *SearchSource {
- s.timeout = timeout
- return s
-}
-
-// TimeoutInMillis controls how many milliseconds a search is allowed
-// to take before it is canceled.
-func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource {
- s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
- return s
-}
-
-// TerminateAfter allows the request to stop after the given number
-// of search hits are collected.
-func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource {
- s.terminateAfter = &terminateAfter
- return s
-}
-
-// Sort adds a sort order.
-func (s *SearchSource) Sort(field string, ascending bool) *SearchSource {
- s.sorters = append(s.sorters, SortInfo{Field: field, Ascending: ascending})
- return s
-}
-
-// SortWithInfo adds a sort order.
-func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource {
- s.sorters = append(s.sorters, info)
- return s
-}
-
-// SortBy adds a sort order.
-func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource {
- s.sorters = append(s.sorters, sorter...)
- return s
-}
-
-func (s *SearchSource) hasSort() bool {
- return len(s.sorters) > 0
-}
-
-// TrackScores is applied when sorting and controls if scores will be
-// tracked as well. Defaults to false.
-func (s *SearchSource) TrackScores(trackScores bool) *SearchSource {
- s.trackScores = trackScores
- return s
-}
-
-// SearchAfter allows a different form of pagination by using a live cursor,
-// using the results of the previous page to help the retrieval of the next.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-after.html
-func (s *SearchSource) SearchAfter(sortValues ...interface{}) *SearchSource {
- s.searchAfterSortValues = append(s.searchAfterSortValues, sortValues...)
- return s
-}
-
-// Aggregation adds an aggreation to perform as part of the search.
-func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource {
- s.aggregations[name] = aggregation
- return s
-}
-
-// DefaultRescoreWindowSize sets the rescore window size for rescores
-// that don't specify their window.
-func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource {
- s.defaultRescoreWindowSize = &defaultRescoreWindowSize
- return s
-}
-
-// Highlight adds highlighting to the search.
-func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource {
- s.highlight = highlight
- return s
-}
-
-// Highlighter returns the highlighter.
-func (s *SearchSource) Highlighter() *Highlight {
- if s.highlight == nil {
- s.highlight = NewHighlight()
- }
- return s.highlight
-}
-
-// GlobalSuggestText defines the global text to use with all suggesters.
-// This avoids repetition.
-func (s *SearchSource) GlobalSuggestText(text string) *SearchSource {
- s.globalSuggestText = text
- return s
-}
-
-// Suggester adds a suggester to the search.
-func (s *SearchSource) Suggester(suggester Suggester) *SearchSource {
- s.suggesters = append(s.suggesters, suggester)
- return s
-}
-
-// Rescorer adds a rescorer to the search.
-func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource {
- s.rescores = append(s.rescores, rescore)
- return s
-}
-
-// ClearRescorers removes all rescorers from the search.
-func (s *SearchSource) ClearRescorers() *SearchSource {
- s.rescores = make([]*Rescore, 0)
- return s
-}
-
-// FetchSource indicates whether the response should contain the stored
-// _source for every hit.
-func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource {
- if s.fetchSourceContext == nil {
- s.fetchSourceContext = NewFetchSourceContext(fetchSource)
- } else {
- s.fetchSourceContext.SetFetchSource(fetchSource)
- }
- return s
-}
-
-// FetchSourceContext indicates how the _source should be fetched.
-func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource {
- s.fetchSourceContext = fetchSourceContext
- return s
-}
-
-// NoStoredFields indicates that no fields should be loaded, resulting in only
-// id and type to be returned per field.
-func (s *SearchSource) NoStoredFields() *SearchSource {
- s.storedFieldNames = nil
- return s
-}
-
-// StoredField adds a single field to load and return (note, must be stored) as
-// part of the search request. If none are specified, the source of the
-// document will be returned.
-func (s *SearchSource) StoredField(storedFieldName string) *SearchSource {
- s.storedFieldNames = append(s.storedFieldNames, storedFieldName)
- return s
-}
-
-// StoredFields sets the fields to load and return as part of the search request.
-// If none are specified, the source of the document will be returned.
-func (s *SearchSource) StoredFields(storedFieldNames ...string) *SearchSource {
- s.storedFieldNames = append(s.storedFieldNames, storedFieldNames...)
- return s
-}
-
-// DocvalueField adds a single field to load from the field data cache
-// and return as part of the search request.
-func (s *SearchSource) DocvalueField(fieldDataField string) *SearchSource {
- s.docvalueFields = append(s.docvalueFields, fieldDataField)
- return s
-}
-
-// DocvalueFields adds one or more fields to load from the field data cache
-// and return as part of the search request.
-func (s *SearchSource) DocvalueFields(docvalueFields ...string) *SearchSource {
- s.docvalueFields = append(s.docvalueFields, docvalueFields...)
- return s
-}
-
-// ScriptField adds a single script field with the provided script.
-func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource {
- s.scriptFields = append(s.scriptFields, scriptField)
- return s
-}
-
-// ScriptFields adds one or more script fields with the provided scripts.
-func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource {
- s.scriptFields = append(s.scriptFields, scriptFields...)
- return s
-}
-
-// IndexBoost sets the boost that a specific index will receive when the
-// query is executed against it.
-func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource {
- s.indexBoosts[index] = boost
- return s
-}
-
-// Stats group this request will be aggregated under.
-func (s *SearchSource) Stats(statsGroup ...string) *SearchSource {
- s.stats = append(s.stats, statsGroup...)
- return s
-}
-
-// InnerHit adds an inner hit to return with the result.
-func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource {
- s.innerHits[name] = innerHit
- return s
-}
-
-// Collapse adds field collapsing.
-func (s *SearchSource) Collapse(collapse *CollapseBuilder) *SearchSource {
- s.collapse = collapse
- return s
-}
-
-// Source returns the serializable JSON for the source builder.
-func (s *SearchSource) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- if s.from != -1 {
- source["from"] = s.from
- }
- if s.size != -1 {
- source["size"] = s.size
- }
- if s.timeout != "" {
- source["timeout"] = s.timeout
- }
- if s.terminateAfter != nil {
- source["terminate_after"] = *s.terminateAfter
- }
- if s.query != nil {
- src, err := s.query.Source()
- if err != nil {
- return nil, err
- }
- source["query"] = src
- }
- if s.postQuery != nil {
- src, err := s.postQuery.Source()
- if err != nil {
- return nil, err
- }
- source["post_filter"] = src
- }
- if s.sliceQuery != nil {
- src, err := s.sliceQuery.Source()
- if err != nil {
- return nil, err
- }
- source["slice"] = src
- }
- if s.minScore != nil {
- source["min_score"] = *s.minScore
- }
- if s.version != nil {
- source["version"] = *s.version
- }
- if s.explain != nil {
- source["explain"] = *s.explain
- }
- if s.profile {
- source["profile"] = s.profile
- }
- if s.collapse != nil {
- src, err := s.collapse.Source()
- if err != nil {
- return nil, err
- }
- source["collapse"] = src
- }
- if s.fetchSourceContext != nil {
- src, err := s.fetchSourceContext.Source()
- if err != nil {
- return nil, err
- }
- source["_source"] = src
- }
-
- if s.storedFieldNames != nil {
- switch len(s.storedFieldNames) {
- case 1:
- source["stored_fields"] = s.storedFieldNames[0]
- default:
- source["stored_fields"] = s.storedFieldNames
- }
- }
-
- if len(s.docvalueFields) > 0 {
- source["docvalue_fields"] = s.docvalueFields
- }
-
- if len(s.scriptFields) > 0 {
- sfmap := make(map[string]interface{})
- for _, scriptField := range s.scriptFields {
- src, err := scriptField.Source()
- if err != nil {
- return nil, err
- }
- sfmap[scriptField.FieldName] = src
- }
- source["script_fields"] = sfmap
- }
-
- if len(s.sorters) > 0 {
- var sortarr []interface{}
- for _, sorter := range s.sorters {
- src, err := sorter.Source()
- if err != nil {
- return nil, err
- }
- sortarr = append(sortarr, src)
- }
- source["sort"] = sortarr
- }
-
- if s.trackScores {
- source["track_scores"] = s.trackScores
- }
-
- if len(s.searchAfterSortValues) > 0 {
- source["search_after"] = s.searchAfterSortValues
- }
-
- if len(s.indexBoosts) > 0 {
- source["indices_boost"] = s.indexBoosts
- }
-
- if len(s.aggregations) > 0 {
- aggsMap := make(map[string]interface{})
- for name, aggregate := range s.aggregations {
- src, err := aggregate.Source()
- if err != nil {
- return nil, err
- }
- aggsMap[name] = src
- }
- source["aggregations"] = aggsMap
- }
-
- if s.highlight != nil {
- src, err := s.highlight.Source()
- if err != nil {
- return nil, err
- }
- source["highlight"] = src
- }
-
- if len(s.suggesters) > 0 {
- suggesters := make(map[string]interface{})
- for _, s := range s.suggesters {
- src, err := s.Source(false)
- if err != nil {
- return nil, err
- }
- suggesters[s.Name()] = src
- }
- if s.globalSuggestText != "" {
- suggesters["text"] = s.globalSuggestText
- }
- source["suggest"] = suggesters
- }
-
- if len(s.rescores) > 0 {
- // Strip empty rescores from request
- var rescores []*Rescore
- for _, r := range s.rescores {
- if !r.IsEmpty() {
- rescores = append(rescores, r)
- }
- }
-
- if len(rescores) == 1 {
- rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize
- src, err := rescores[0].Source()
- if err != nil {
- return nil, err
- }
- source["rescore"] = src
- } else {
- var slice []interface{}
- for _, r := range rescores {
- r.defaultRescoreWindowSize = s.defaultRescoreWindowSize
- src, err := r.Source()
- if err != nil {
- return nil, err
- }
- slice = append(slice, src)
- }
- source["rescore"] = slice
- }
- }
-
- if len(s.stats) > 0 {
- source["stats"] = s.stats
- }
-
- if len(s.innerHits) > 0 {
- // Top-level inner hits
- // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits
- // "inner_hits": {
- // "<inner_hits_name>": {
- // "<path|type>": {
- // "<path-to-nested-object-field|child-or-parent-type>": {
- // <inner_hits_body>,
- // [,"inner_hits" : { [<sub_inner_hits>]+ } ]?
- // }
- // }
- // },
- // [,"<inner_hits_name_2>" : { ... } ]*
- // }
- m := make(map[string]interface{})
- for name, hit := range s.innerHits {
- if hit.path != "" {
- src, err := hit.Source()
- if err != nil {
- return nil, err
- }
- path := make(map[string]interface{})
- path[hit.path] = src
- m[name] = map[string]interface{}{
- "path": path,
- }
- } else if hit.typ != "" {
- src, err := hit.Source()
- if err != nil {
- return nil, err
- }
- typ := make(map[string]interface{})
- typ[hit.typ] = src
- m[name] = map[string]interface{}{
- "type": typ,
- }
- } else {
- // TODO the Java client throws here, because either path or typ must be specified
- _ = m
- }
- }
- source["inner_hits"] = m
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_source_test.go b/vendor/github.com/olivere/elastic/search_source_test.go
deleted file mode 100644
index a78991bf0..000000000
--- a/vendor/github.com/olivere/elastic/search_source_test.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSearchSourceMatchAllQuery(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceNoStoredFields(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).NoStoredFields()
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceStoredFields(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).StoredFields("message", "tags")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query":{"match_all":{}},"stored_fields":["message","tags"]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceFetchSourceDisabled(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).FetchSource(false)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_source":false,"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceFetchSourceByWildcards(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- fsc := NewFetchSourceContext(true).Include("obj1.*", "obj2.*").Exclude("*.description")
- builder := NewSearchSource().Query(matchAllQ).FetchSourceContext(fsc)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_source":{"excludes":["*.description"],"includes":["obj1.*","obj2.*"]},"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceDocvalueFields(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).DocvalueFields("test1", "test2")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"docvalue_fields":["test1","test2"],"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceScriptFields(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- sf1 := NewScriptField("test1", NewScript("doc['my_field_name'].value * 2"))
- sf2 := NewScriptField("test2", NewScript("doc['my_field_name'].value * factor").Param("factor", 3.1415927))
- builder := NewSearchSource().Query(matchAllQ).ScriptFields(sf1, sf2)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":{"source":"doc['my_field_name'].value * 2"}},"test2":{"script":{"params":{"factor":3.1415927},"source":"doc['my_field_name'].value * factor"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourcePostFilter(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- pf := NewTermQuery("tag", "important")
- builder := NewSearchSource().Query(matchAllQ).PostFilter(pf)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"post_filter":{"term":{"tag":"important"}},"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceHighlight(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- hl := NewHighlight().Field("content")
- builder := NewSearchSource().Query(matchAllQ).Highlight(hl)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"highlight":{"fields":{"content":{}}},"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceRescoring(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- rescorerQuery := NewMatchPhraseQuery("field1", "the quick brown fox").Slop(2)
- rescorer := NewQueryRescorer(rescorerQuery)
- rescorer = rescorer.QueryWeight(0.7)
- rescorer = rescorer.RescoreQueryWeight(1.2)
- rescore := NewRescore().WindowSize(50).Rescorer(rescorer)
- builder := NewSearchSource().Query(matchAllQ).Rescorer(rescore)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query":{"match_all":{}},"rescore":{"query":{"query_weight":0.7,"rescore_query":{"match_phrase":{"field1":{"query":"the quick brown fox","slop":2}}},"rescore_query_weight":1.2},"window_size":50}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceIndexBoost(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).IndexBoost("index1", 1.4).IndexBoost("index2", 1.3)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"indices_boost":{"index1":1.4,"index2":1.3},"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceMixDifferentSorters(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).
- Sort("a", false).
- SortWithInfo(SortInfo{Field: "b", Ascending: true}).
- SortBy(NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number"))
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query":{"match_all":{}},"sort":[{"a":{"order":"desc"}},{"b":{"order":"asc"}},{"_script":{"order":"asc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceInnerHits(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).
- InnerHit("comments", NewInnerHit().Type("comment").Query(NewMatchQuery("user", "olivere"))).
- InnerHit("views", NewInnerHit().Path("view"))
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"inner_hits":{"comments":{"type":{"comment":{"query":{"match":{"user":{"query":"olivere"}}}}}},"views":{"path":{"view":{}}}},"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceSearchAfter(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).SearchAfter(1463538857, "tweet#654323")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"query":{"match_all":{}},"search_after":[1463538857,"tweet#654323"]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSearchSourceProfiledQuery(t *testing.T) {
- matchAllQ := NewMatchAllQuery()
- builder := NewSearchSource().Query(matchAllQ).Profile(true)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"profile":true,"query":{"match_all":{}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_suggester_test.go b/vendor/github.com/olivere/elastic/search_suggester_test.go
deleted file mode 100644
index 33bdc9275..000000000
--- a/vendor/github.com/olivere/elastic/search_suggester_test.go
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestTermSuggester(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- tsName := "my-suggestions"
- ts := NewTermSuggester(tsName)
- ts = ts.Text("Goolang")
- ts = ts.Field("message")
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(NewMatchAllQuery()).
- Suggester(ts).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Suggest == nil {
- t.Errorf("expected SearchResult.Suggest != nil; got nil")
- }
- mySuggestions, found := searchResult.Suggest[tsName]
- if !found {
- t.Errorf("expected to find SearchResult.Suggest[%s]; got false", tsName)
- }
- if mySuggestions == nil {
- t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", tsName)
- }
-
- if len(mySuggestions) != 1 {
- t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
- }
- mySuggestion := mySuggestions[0]
- if mySuggestion.Text != "goolang" {
- t.Errorf("expected Text = 'goolang'; got %s", mySuggestion.Text)
- }
- if mySuggestion.Offset != 0 {
- t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
- }
- if mySuggestion.Length != 7 {
- t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
- }
- if len(mySuggestion.Options) != 1 {
- t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
- }
- myOption := mySuggestion.Options[0]
- if myOption.Text != "golang" {
- t.Errorf("expected Text = 'golang'; got %s", myOption.Text)
- }
-}
-
-func TestPhraseSuggester(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- phraseSuggesterName := "my-suggestions"
- ps := NewPhraseSuggester(phraseSuggesterName)
- ps = ps.Text("Goolang")
- ps = ps.Field("message")
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(NewMatchAllQuery()).
- Suggester(ps).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Suggest == nil {
- t.Errorf("expected SearchResult.Suggest != nil; got nil")
- }
- mySuggestions, found := searchResult.Suggest[phraseSuggesterName]
- if !found {
- t.Errorf("expected to find SearchResult.Suggest[%s]; got false", phraseSuggesterName)
- }
- if mySuggestions == nil {
- t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", phraseSuggesterName)
- }
-
- if len(mySuggestions) != 1 {
- t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
- }
- mySuggestion := mySuggestions[0]
- if mySuggestion.Text != "Goolang" {
- t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text)
- }
- if mySuggestion.Offset != 0 {
- t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
- }
- if mySuggestion.Length != 7 {
- t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
- }
- if want, have := 1, len(mySuggestion.Options); want != have {
- t.Errorf("expected len(options) = %d; got %d", want, have)
- }
- if want, have := "golang", mySuggestion.Options[0].Text; want != have {
- t.Errorf("expected options[0].Text = %q; got %q", want, have)
- }
- if score := mySuggestion.Options[0].Score; score <= 0.0 {
- t.Errorf("expected options[0].Score > 0.0; got %v", score)
- }
-}
-
-func TestCompletionSuggester(t *testing.T) {
- client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- tweet1 := tweet{
- User: "olivere",
- Message: "Welcome to Golang and Elasticsearch.",
- Suggest: NewSuggestField("Golang", "Elasticsearch"),
- }
- tweet2 := tweet{
- User: "olivere",
- Message: "Another unrelated topic.",
- Suggest: NewSuggestField("Another unrelated topic."),
- }
- tweet3 := tweet{
- User: "sandrae",
- Message: "Cycling is fun.",
- Suggest: NewSuggestField("Cycling is fun."),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- suggesterName := "my-suggestions"
- cs := NewCompletionSuggester(suggesterName)
- cs = cs.Text("Golang")
- cs = cs.Field("suggest_field")
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(NewMatchAllQuery()).
- Suggester(cs).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Suggest == nil {
- t.Errorf("expected SearchResult.Suggest != nil; got nil")
- }
- mySuggestions, found := searchResult.Suggest[suggesterName]
- if !found {
- t.Errorf("expected to find SearchResult.Suggest[%s]; got false", suggesterName)
- }
- if mySuggestions == nil {
- t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName)
- }
-
- if len(mySuggestions) != 1 {
- t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
- }
- mySuggestion := mySuggestions[0]
- if mySuggestion.Text != "Golang" {
- t.Errorf("expected Text = 'Golang'; got %s", mySuggestion.Text)
- }
- if mySuggestion.Offset != 0 {
- t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
- }
- if mySuggestion.Length != 6 {
- t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
- }
- if len(mySuggestion.Options) != 1 {
- t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
- }
- myOption := mySuggestion.Options[0]
- if myOption.Text != "Golang" {
- t.Errorf("expected Text = 'Golang'; got %s", myOption.Text)
- }
-}
-
-func TestContextSuggester(t *testing.T) {
- client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- // TODO make a nice way of creating tweets, as currently the context fields are unsupported as part of the suggestion fields
- tweet1 := `
- {
- "user":"olivere",
- "message":"Welcome to Golang and Elasticsearch.",
- "retweets":0,
- "created":"0001-01-01T00:00:00Z",
- "suggest_field":{
- "input":[
- "Golang",
- "Elasticsearch"
- ],
- "contexts":{
- "user_name": ["olivere"]
- }
- }
- }
- `
- tweet2 := `
- {
- "user":"sandrae",
- "message":"I like golfing",
- "retweets":0,
- "created":"0001-01-01T00:00:00Z",
- "suggest_field":{
- "input":[
- "Golfing"
- ],
- "contexts":{
- "user_name": ["sandrae"]
- }
- }
- }
- `
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyString(tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyString(tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- suggesterName := "my-suggestions"
- cs := NewContextSuggester(suggesterName)
- cs = cs.Prefix("Gol")
- cs = cs.Field("suggest_field")
- cs = cs.ContextQueries(
- NewSuggesterCategoryQuery("user_name", "olivere"),
- )
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Suggester(cs).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Suggest == nil {
- t.Errorf("expected SearchResult.Suggest != nil; got nil")
- }
- mySuggestions, found := searchResult.Suggest[suggesterName]
- if !found {
- t.Errorf("expected to find SearchResult.Suggest[%s]; got false", suggesterName)
- }
- if mySuggestions == nil {
- t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName)
- }
-
- // sandra's tweet is not returned because of the user_name context
- if len(mySuggestions) != 1 {
- t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
- }
- mySuggestion := mySuggestions[0]
- if mySuggestion.Text != "Gol" {
- t.Errorf("expected Text = 'Gol'; got %s", mySuggestion.Text)
- }
- if mySuggestion.Offset != 0 {
- t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
- }
- if mySuggestion.Length != 3 {
- t.Errorf("expected Length = %d; got %d", 3, mySuggestion.Length)
- }
- if len(mySuggestion.Options) != 1 {
- t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
- }
- myOption := mySuggestion.Options[0]
- if myOption.Text != "Golang" {
- t.Errorf("expected Text = 'Golang'; got %s", myOption.Text)
- }
- if myOption.Id != "1" {
- t.Errorf("expected Id = '1'; got %s", myOption.Id)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_terms_lookup.go b/vendor/github.com/olivere/elastic/search_terms_lookup.go
deleted file mode 100644
index 9a2456bdd..000000000
--- a/vendor/github.com/olivere/elastic/search_terms_lookup.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TermsLookup encapsulates the parameters needed to fetch terms.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-terms-query.html#query-dsl-terms-lookup.
-type TermsLookup struct {
- index string
- typ string
- id string
- path string
- routing string
-}
-
-// NewTermsLookup creates and initializes a new TermsLookup.
-func NewTermsLookup() *TermsLookup {
- t := &TermsLookup{}
- return t
-}
-
-// Index name.
-func (t *TermsLookup) Index(index string) *TermsLookup {
- t.index = index
- return t
-}
-
-// Type name.
-func (t *TermsLookup) Type(typ string) *TermsLookup {
- t.typ = typ
- return t
-}
-
-// Id to look up.
-func (t *TermsLookup) Id(id string) *TermsLookup {
- t.id = id
- return t
-}
-
-// Path to use for lookup.
-func (t *TermsLookup) Path(path string) *TermsLookup {
- t.path = path
- return t
-}
-
-// Routing value.
-func (t *TermsLookup) Routing(routing string) *TermsLookup {
- t.routing = routing
- return t
-}
-
-// Source creates the JSON source of the builder.
-func (t *TermsLookup) Source() (interface{}, error) {
- src := make(map[string]interface{})
- if t.index != "" {
- src["index"] = t.index
- }
- if t.typ != "" {
- src["type"] = t.typ
- }
- if t.id != "" {
- src["id"] = t.id
- }
- if t.path != "" {
- src["path"] = t.path
- }
- if t.routing != "" {
- src["routing"] = t.routing
- }
- return src, nil
-}
diff --git a/vendor/github.com/olivere/elastic/search_terms_lookup_test.go b/vendor/github.com/olivere/elastic/search_terms_lookup_test.go
deleted file mode 100644
index 369f72346..000000000
--- a/vendor/github.com/olivere/elastic/search_terms_lookup_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestTermsLookup(t *testing.T) {
- tl := NewTermsLookup().Index("users").Type("user").Id("2").Path("followers")
- src, err := tl.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"id":"2","index":"users","path":"followers","type":"user"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/search_test.go b/vendor/github.com/olivere/elastic/search_test.go
deleted file mode 100644
index 586089aaa..000000000
--- a/vendor/github.com/olivere/elastic/search_test.go
+++ /dev/null
@@ -1,1320 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "reflect"
- "testing"
- "time"
-)
-
-func TestSearchMatchAll(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(NewMatchAllQuery()).
- Size(100).
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if got, want := searchResult.Hits.TotalHits, int64(3); got != want {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
- }
- if got, want := len(searchResult.Hits.Hits), 3; got != want {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestSearchMatchAllWithRequestCacheDisabled(t *testing.T) {
- //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents, with request cache disabled
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(NewMatchAllQuery()).
- Size(100).
- Pretty(true).
- RequestCache(false).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if got, want := searchResult.Hits.TotalHits, int64(3); got != want {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
- }
- if got, want := len(searchResult.Hits.Hits), 3; got != want {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
- }
-}
-
-func BenchmarkSearchMatchAll(b *testing.B) {
- client := setupTestClientAndCreateIndexAndAddDocs(b)
-
- for n := 0; n < b.N; n++ {
- // Match all should return all documents
- all := NewMatchAllQuery()
- searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO())
- if err != nil {
- b.Fatal(err)
- }
- if searchResult.Hits == nil {
- b.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits == 0 {
- b.Errorf("expected SearchResult.Hits.TotalHits > %d; got %d", 0, searchResult.Hits.TotalHits)
- }
- }
-}
-
-func TestSearchResultTotalHits(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- count, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- all := NewMatchAllQuery()
- searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- got := searchResult.TotalHits()
- if got != count {
- t.Fatalf("expected %d hits; got: %d", count, got)
- }
-
- // No hits
- searchResult = &SearchResult{}
- got = searchResult.TotalHits()
- if got != 0 {
- t.Errorf("expected %d hits; got: %d", 0, got)
- }
-}
-
-func TestSearchResultWithProfiling(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- all := NewMatchAllQuery()
- searchResult, err := client.Search().Index(testIndexName).Query(all).Profile(true).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- if searchResult.Profile == nil {
- t.Fatal("Profiled MatchAll query did not return profiling data with results")
- }
-}
-
-func TestSearchResultEach(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- all := NewMatchAllQuery()
- searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Iterate over non-ptr type
- var aTweet tweet
- count := 0
- for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
- count++
- _, ok := item.(tweet)
- if !ok {
- t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
- }
- }
- if count == 0 {
- t.Errorf("expected to find some hits; got: %d", count)
- }
-
- // Iterate over ptr-type
- count = 0
- var aTweetPtr *tweet
- for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) {
- count++
- tw, ok := item.(*tweet)
- if !ok {
- t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
- }
- if tw == nil {
- t.Fatal("expected hit to not be nil")
- }
- }
- if count == 0 {
- t.Errorf("expected to find some hits; got: %d", count)
- }
-
- // Does not iterate when no hits are found
- searchResult = &SearchResult{Hits: nil}
- count = 0
- for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
- count++
- _ = item
- }
- if count != 0 {
- t.Errorf("expected to not find any hits; got: %d", count)
- }
- searchResult = &SearchResult{Hits: &SearchHits{Hits: make([]*SearchHit, 0)}}
- count = 0
- for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
- count++
- _ = item
- }
- if count != 0 {
- t.Errorf("expected to not find any hits; got: %d", count)
- }
-}
-
-func TestSearchResultEachNoSource(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocsNoSource(t)
-
- all := NewMatchAllQuery()
- searchResult, err := client.Search().Index(testNoSourceIndexName).Query(all).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Iterate over non-ptr type
- var aTweet tweet
- count := 0
- for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
- count++
- tw, ok := item.(tweet)
- if !ok {
- t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
- }
-
- if tw.User != "" {
- t.Fatalf("expected no _source hit to be empty tweet; got: %v", reflect.ValueOf(item))
- }
- }
- if count != 2 {
- t.Errorf("expected to find 2 hits; got: %d", count)
- }
-
- // Iterate over ptr-type
- count = 0
- var aTweetPtr *tweet
- for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) {
- count++
- tw, ok := item.(*tweet)
- if !ok {
- t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
- }
- if tw != nil {
- t.Fatal("expected hit to be nil")
- }
- }
- if count != 2 {
- t.Errorf("expected to find 2 hits; got: %d", count)
- }
-}
-
-func TestSearchSorting(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- all := NewMatchAllQuery()
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(all).
- Sort("created", false).
- Timeout("1s").
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 3 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestSearchSortingBySorters(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- all := NewMatchAllQuery()
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(all).
- SortBy(NewFieldSort("created").Desc(), NewScoreSort()).
- Timeout("1s").
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 3 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestSearchSpecificFields(t *testing.T) {
- // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- all := NewMatchAllQuery()
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(all).
- StoredFields("message").
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 3 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- if hit.Source != nil {
- t.Fatalf("expected SearchResult.Hits.Hit.Source to be nil; got: %q", hit.Source)
- }
- if hit.Fields == nil {
- t.Fatal("expected SearchResult.Hits.Hit.Fields to be != nil")
- }
- field, found := hit.Fields["message"]
- if !found {
- t.Errorf("expected SearchResult.Hits.Hit.Fields[%s] to be found", "message")
- }
- fields, ok := field.([]interface{})
- if !ok {
- t.Errorf("expected []interface{}; got: %v", reflect.TypeOf(fields))
- }
- if len(fields) != 1 {
- t.Errorf("expected a field with 1 entry; got: %d", len(fields))
- }
- message, ok := fields[0].(string)
- if !ok {
- t.Errorf("expected a string; got: %v", reflect.TypeOf(fields[0]))
- }
- if message == "" {
- t.Errorf("expected a message; got: %q", message)
- }
- }
-}
-
-func TestSearchExplain(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Match all should return all documents
- all := NewMatchAllQuery()
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(all).
- Explain(true).
- Timeout("1s").
- // Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 3 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- if hit.Explanation == nil {
- t.Fatal("expected search explanation")
- }
- if hit.Explanation.Value <= 0.0 {
- t.Errorf("expected explanation value to be > 0.0; got: %v", hit.Explanation.Value)
- }
- if hit.Explanation.Description == "" {
- t.Errorf("expected explanation description != %q; got: %q", "", hit.Explanation.Description)
- }
- }
-}
-
-func TestSearchSource(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Set up the request JSON manually to pass to the search service via Source()
- source := map[string]interface{}{
- "query": map[string]interface{}{
- "match_all": map[string]interface{}{},
- },
- }
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Source(source). // sets the JSON request
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
-}
-
-func TestSearchSourceWithString(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Source(`{"query":{"match_all":{}}}`). // sets the JSON request
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
-}
-
-func TestSearchRawString(t *testing.T) {
- // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- query := RawStringQuery(`{"match_all":{}}`)
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(query).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
-}
-
-func TestSearchSearchSource(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Set up the search source manually and pass it to the search service via SearchSource()
- ss := NewSearchSource().Query(NewMatchAllQuery()).From(0).Size(2)
-
- // One can use ss.Source() to get to the raw interface{} that will be used
- // as the search request JSON by the SearchService.
-
- searchResult, err := client.Search().
- Index(testIndexName).
- SearchSource(ss). // sets the SearchSource
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 2 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits))
- }
-}
-
-func TestSearchInnerHitsOnHasChild(t *testing.T) {
- // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- ctx := context.Background()
-
- // Create join index
- createIndex, err := client.CreateIndex(testJoinIndex).Body(testJoinMapping).Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if createIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createIndex)
- }
-
- // Add documents
- // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html for example code.
- doc1 := joinDoc{
- Message: "This is a question",
- JoinField: &joinField{Name: "question"},
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("1").BodyJson(&doc1).Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- doc2 := joinDoc{
- Message: "This is another question",
- JoinField: "question",
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("2").BodyJson(&doc2).Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- doc3 := joinDoc{
- Message: "This is an answer",
- JoinField: &joinField{
- Name: "answer",
- Parent: "1",
- },
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("3").BodyJson(&doc3).Routing("1").Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- doc4 := joinDoc{
- Message: "This is another answer",
- JoinField: &joinField{
- Name: "answer",
- Parent: "1",
- },
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("4").BodyJson(&doc4).Routing("1").Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testJoinIndex).Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // Search for all documents that have an answer, and return those answers as inner hits
- bq := NewBoolQuery()
- bq = bq.Must(NewMatchAllQuery())
- bq = bq.Filter(NewHasChildQuery("answer", NewMatchAllQuery()).
- InnerHit(NewInnerHit().Name("answers")))
-
- searchResult, err := client.Search().
- Index(testJoinIndex).
- Query(bq).
- Pretty(true).
- Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 1 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits)
- }
- if len(searchResult.Hits.Hits) != 1 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits))
- }
-
- hit := searchResult.Hits.Hits[0]
- if want, have := "1", hit.Id; want != have {
- t.Fatalf("expected tweet %q; got: %q", want, have)
- }
- if hit.InnerHits == nil {
- t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
- }
- if want, have := 1, len(hit.InnerHits); want != have {
- t.Fatalf("expected %d inner hits; got: %d", want, have)
- }
- innerHits, found := hit.InnerHits["answers"]
- if !found {
- t.Fatalf("expected inner hits for name %q", "answers")
- }
- if innerHits == nil || innerHits.Hits == nil {
- t.Fatal("expected inner hits != nil")
- }
- if want, have := 2, len(innerHits.Hits.Hits); want != have {
- t.Fatalf("expected %d inner hits; got: %d", want, have)
- }
- if want, have := "3", innerHits.Hits.Hits[0].Id; want != have {
- t.Fatalf("expected inner hit with id %q; got: %q", want, have)
- }
- if want, have := "4", innerHits.Hits.Hits[1].Id; want != have {
- t.Fatalf("expected inner hit with id %q; got: %q", want, have)
- }
-}
-
-func TestSearchInnerHitsOnHasParent(t *testing.T) {
- // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- ctx := context.Background()
-
- // Create join index
- createIndex, err := client.CreateIndex(testJoinIndex).Body(testJoinMapping).Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if createIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createIndex)
- }
-
- // Add documents
- // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html for example code.
- doc1 := joinDoc{
- Message: "This is a question",
- JoinField: &joinField{Name: "question"},
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("1").BodyJson(&doc1).Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- doc2 := joinDoc{
- Message: "This is another question",
- JoinField: "question",
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("2").BodyJson(&doc2).Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- doc3 := joinDoc{
- Message: "This is an answer",
- JoinField: &joinField{
- Name: "answer",
- Parent: "1",
- },
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("3").BodyJson(&doc3).Routing("1").Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- doc4 := joinDoc{
- Message: "This is another answer",
- JoinField: &joinField{
- Name: "answer",
- Parent: "1",
- },
- }
- _, err = client.Index().Index(testJoinIndex).Type("doc").Id("4").BodyJson(&doc4).Routing("1").Refresh("true").Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testJoinIndex).Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // Search for all documents that have an answer, and return those answers as inner hits
- bq := NewBoolQuery()
- bq = bq.Must(NewMatchAllQuery())
- bq = bq.Filter(NewHasParentQuery("question", NewMatchAllQuery()).
- InnerHit(NewInnerHit().Name("answers")))
-
- searchResult, err := client.Search().
- Index(testJoinIndex).
- Query(bq).
- Pretty(true).
- Do(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if want, have := int64(2), searchResult.Hits.TotalHits; want != have {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, have)
- }
- if want, have := 2, len(searchResult.Hits.Hits); want != have {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, have)
- }
-
- hit := searchResult.Hits.Hits[0]
- if want, have := "3", hit.Id; want != have {
- t.Fatalf("expected tweet %q; got: %q", want, have)
- }
- if hit.InnerHits == nil {
- t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
- }
- if want, have := 1, len(hit.InnerHits); want != have {
- t.Fatalf("expected %d inner hits; got: %d", want, have)
- }
- innerHits, found := hit.InnerHits["answers"]
- if !found {
- t.Fatalf("expected inner hits for name %q", "tweets")
- }
- if innerHits == nil || innerHits.Hits == nil {
- t.Fatal("expected inner hits != nil")
- }
- if want, have := 1, len(innerHits.Hits.Hits); want != have {
- t.Fatalf("expected %d inner hits; got: %d", want, have)
- }
- if want, have := "1", innerHits.Hits.Hits[0].Id; want != have {
- t.Fatalf("expected inner hit with id %q; got: %q", want, have)
- }
-
- hit = searchResult.Hits.Hits[1]
- if want, have := "4", hit.Id; want != have {
- t.Fatalf("expected tweet %q; got: %q", want, have)
- }
- if hit.InnerHits == nil {
- t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
- }
- if want, have := 1, len(hit.InnerHits); want != have {
- t.Fatalf("expected %d inner hits; got: %d", want, have)
- }
- innerHits, found = hit.InnerHits["answers"]
- if !found {
- t.Fatalf("expected inner hits for name %q", "tweets")
- }
- if innerHits == nil || innerHits.Hits == nil {
- t.Fatal("expected inner hits != nil")
- }
- if want, have := 1, len(innerHits.Hits.Hits); want != have {
- t.Fatalf("expected %d inner hits; got: %d", want, have)
- }
- if want, have := "1", innerHits.Hits.Hits[0].Id; want != have {
- t.Fatalf("expected inner hit with id %q; got: %q", want, have)
- }
-}
-
-func TestSearchBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Indices []string
- Types []string
- Expected string
- }{
- {
- []string{},
- []string{},
- "/_search",
- },
- {
- []string{"index1"},
- []string{},
- "/index1/_search",
- },
- {
- []string{"index1", "index2"},
- []string{},
- "/index1%2Cindex2/_search",
- },
- {
- []string{},
- []string{"type1"},
- "/_all/type1/_search",
- },
- {
- []string{"index1"},
- []string{"type1"},
- "/index1/type1/_search",
- },
- {
- []string{"index1", "index2"},
- []string{"type1", "type2"},
- "/index1%2Cindex2/type1%2Ctype2/_search",
- },
- {
- []string{},
- []string{"type1", "type2"},
- "/_all/type1%2Ctype2/_search",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.Search().Index(test.Indices...).Type(test.Types...).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestSearchFilterPath(t *testing.T) {
- // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
- client := setupTestClientAndCreateIndexAndAddDocs(t)
-
- // Match all should return all documents
- all := NewMatchAllQuery()
- searchResult, err := client.Search().
- Index(testIndexName).
- Type("doc").
- Query(all).
- FilterPath(
- "took",
- "hits.hits._id",
- "hits.hits._source.user",
- "hits.hits._source.message",
- ).
- Timeout("1s").
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Fatalf("expected SearchResult.Hits != nil; got nil")
- }
- // 0 because it was filtered out
- if want, got := int64(0), searchResult.Hits.TotalHits; want != got {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
- }
- if want, got := 3, len(searchResult.Hits.Hits); want != got {
- t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if want, got := "", hit.Index; want != got {
- t.Fatalf("expected index %q, got %q", want, got)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- // user field
- v, found := item["user"]
- if !found {
- t.Fatalf("expected SearchResult.Hits.Hit[%q] to be found", "user")
- }
- if v == "" {
- t.Fatalf("expected user field, got %v (%T)", v, v)
- }
- // No retweets field
- v, found = item["retweets"]
- if found {
- t.Fatalf("expected SearchResult.Hits.Hit[%q] to not be found, got %v", "retweets", v)
- }
- if v == "" {
- t.Fatalf("expected user field, got %v (%T)", v, v)
- }
- }
-}
-
-func TestSearchAfter(t *testing.T) {
- // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Query(NewMatchAllQuery()).
- SearchAfter("olivere").
- Sort("user", true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if searchResult.Hits == nil {
- t.Errorf("expected SearchResult.Hits != nil; got nil")
- }
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
- }
- if want, got := 1, len(searchResult.Hits.Hits); want != got {
- t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got: %d", want, got)
- }
- hit := searchResult.Hits.Hits[0]
- if want, got := "3", hit.Id; want != got {
- t.Fatalf("expected tweet %q; got: %q", want, got)
- }
-}
-
-func TestSearchResultWithFieldCollapsing(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Type("doc").
- Query(NewMatchAllQuery()).
- Collapse(NewCollapseBuilder("user")).
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- if searchResult.Hits == nil {
- t.Fatalf("expected SearchResult.Hits != nil; got nil")
- }
- if got := searchResult.Hits.TotalHits; got == 0 {
- t.Fatalf("expected SearchResult.Hits.TotalHits > 0; got %d", got)
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- if len(hit.Fields) == 0 {
- t.Fatal("expected fields in SearchResult")
- }
- usersVal, ok := hit.Fields["user"]
- if !ok {
- t.Fatalf("expected %q field in fields of SearchResult", "user")
- }
- users, ok := usersVal.([]interface{})
- if !ok {
- t.Fatalf("expected slice of strings in field of SearchResult, got %T", usersVal)
- }
- if len(users) != 1 {
- t.Fatalf("expected 1 entry in users slice, got %d", len(users))
- }
- }
-}
-
-func TestSearchResultWithFieldCollapsingAndInnerHits(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- searchResult, err := client.Search().
- Index(testIndexName).
- Type("doc").
- Query(NewMatchAllQuery()).
- Collapse(
- NewCollapseBuilder("user").
- InnerHit(
- NewInnerHit().Name("last_tweets").Size(5).Sort("created", true),
- ).
- MaxConcurrentGroupRequests(4)).
- Pretty(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- if searchResult.Hits == nil {
- t.Fatalf("expected SearchResult.Hits != nil; got nil")
- }
- if got := searchResult.Hits.TotalHits; got == 0 {
- t.Fatalf("expected SearchResult.Hits.TotalHits > 0; got %d", got)
- }
-
- for _, hit := range searchResult.Hits.Hits {
- if hit.Index != testIndexName {
- t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
- }
- item := make(map[string]interface{})
- err := json.Unmarshal(*hit.Source, &item)
- if err != nil {
- t.Fatal(err)
- }
- if len(hit.Fields) == 0 {
- t.Fatal("expected fields in SearchResult")
- }
- usersVal, ok := hit.Fields["user"]
- if !ok {
- t.Fatalf("expected %q field in fields of SearchResult", "user")
- }
- users, ok := usersVal.([]interface{})
- if !ok {
- t.Fatalf("expected slice of strings in field of SearchResult, got %T", usersVal)
- }
- if len(users) != 1 {
- t.Fatalf("expected 1 entry in users slice, got %d", len(users))
- }
- lastTweets, ok := hit.InnerHits["last_tweets"]
- if !ok {
- t.Fatalf("expected inner_hits named %q in SearchResult", "last_tweets")
- }
- if lastTweets == nil {
- t.Fatal("expected inner_hits in SearchResult")
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/setup_test.go b/vendor/github.com/olivere/elastic/setup_test.go
deleted file mode 100644
index 480ae5d20..000000000
--- a/vendor/github.com/olivere/elastic/setup_test.go
+++ /dev/null
@@ -1,445 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "log"
- "math/rand"
- "os"
- "time"
-)
-
-const (
- testIndexName = "elastic-test"
- testIndexName2 = "elastic-test2"
- testIndexName3 = "elastic-test3"
- testMapping = `
-{
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "properties":{
- "user":{
- "type":"keyword"
- },
- "message":{
- "type":"text",
- "store": true,
- "fielddata": true
- },
- "tags":{
- "type":"keyword"
- },
- "location":{
- "type":"geo_point"
- },
- "suggest_field":{
- "type":"completion",
- "contexts":[
- {
- "name":"user_name",
- "type":"category"
- }
- ]
- }
- }
- }
- }
-}
-`
-
- testNoSourceIndexName = "elastic-nosource-test"
- testNoSourceMapping = `
-{
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "_source": {
- "enabled": false
- },
- "properties":{
- "user":{
- "type":"keyword"
- },
- "message":{
- "type":"text",
- "store": true,
- "fielddata": true
- },
- "tags":{
- "type":"keyword"
- },
- "location":{
- "type":"geo_point"
- },
- "suggest_field":{
- "type":"completion",
- "contexts":[
- {
- "name":"user_name",
- "type":"category"
- }
- ]
- }
- }
- }
- }
-}
-`
-
- testJoinIndex = "elastic-joins"
- testJoinMapping = `
- {
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "properties":{
- "message":{
- "type":"text"
- },
- "my_join_field": {
- "type": "join",
- "relations": {
- "question": "answer"
- }
- }
- }
- }
- }
- }
-`
-
- testOrderIndex = "elastic-orders"
- testOrderMapping = `
-{
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "properties":{
- "article":{
- "type":"text"
- },
- "manufacturer":{
- "type":"keyword"
- },
- "price":{
- "type":"float"
- },
- "time":{
- "type":"date",
- "format": "YYYY-MM-dd"
- }
- }
- }
- }
-}
-`
-
- /*
- testDoctypeIndex = "elastic-doctypes"
- testDoctypeMapping = `
- {
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "properties":{
- "message":{
- "type":"text",
- "store": true,
- "fielddata": true
- }
- }
- }
- }
- }
- `
- */
-
- testQueryIndex = "elastic-queries"
- testQueryMapping = `
-{
- "settings":{
- "number_of_shards":1,
- "number_of_replicas":0
- },
- "mappings":{
- "doc":{
- "properties":{
- "message":{
- "type":"text",
- "store": true,
- "fielddata": true
- },
- "query": {
- "type": "percolator"
- }
- }
- }
- }
-}
-`
-)
-
-type tweet struct {
- User string `json:"user"`
- Message string `json:"message"`
- Retweets int `json:"retweets"`
- Image string `json:"image,omitempty"`
- Created time.Time `json:"created,omitempty"`
- Tags []string `json:"tags,omitempty"`
- Location string `json:"location,omitempty"`
- Suggest *SuggestField `json:"suggest_field,omitempty"`
-}
-
-func (t tweet) String() string {
- return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets)
-}
-
-type comment struct {
- User string `json:"user"`
- Comment string `json:"comment"`
- Created time.Time `json:"created,omitempty"`
-}
-
-func (c comment) String() string {
- return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment)
-}
-
-type joinDoc struct {
- Message string `json:"message"`
- JoinField interface{} `json:"my_join_field,omitempty"`
-}
-
-type joinField struct {
- Name string `json:"name"`
- Parent string `json:"parent,omitempty"`
-}
-
-type order struct {
- Article string `json:"article"`
- Manufacturer string `json:"manufacturer"`
- Price float64 `json:"price"`
- Time string `json:"time,omitempty"`
-}
-
-func (o order) String() string {
- return fmt.Sprintf("order{Article:%q,Manufacturer:%q,Price:%v,Time:%v}", o.Article, o.Manufacturer, o.Price, o.Time)
-}
-
-// doctype is required for Percolate tests.
-type doctype struct {
- Message string `json:"message"`
-}
-
-// queries is required for Percolate tests.
-type queries struct {
- Query string `json:"query"`
-}
-
-func isTravis() bool {
- return os.Getenv("TRAVIS") != ""
-}
-
-func travisGoVersion() string {
- return os.Getenv("TRAVIS_GO_VERSION")
-}
-
-type logger interface {
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Fail()
- FailNow()
- Log(args ...interface{})
- Logf(format string, args ...interface{})
-}
-
-func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) {
- var err error
-
- client, err = NewClient(options...)
- if err != nil {
- t.Fatal(err)
- }
-
- client.DeleteIndex(testIndexName).Do(context.TODO())
- client.DeleteIndex(testIndexName2).Do(context.TODO())
- client.DeleteIndex(testIndexName3).Do(context.TODO())
- client.DeleteIndex(testOrderIndex).Do(context.TODO())
- client.DeleteIndex(testNoSourceIndexName).Do(context.TODO())
- //client.DeleteIndex(testDoctypeIndex).Do(context.TODO())
- client.DeleteIndex(testQueryIndex).Do(context.TODO())
- client.DeleteIndex(testJoinIndex).Do(context.TODO())
-
- return client
-}
-
-func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client {
- client := setupTestClient(t, options...)
-
- // Create index
- createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createIndex)
- }
-
- // Create second index
- createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createIndex2 == nil {
- t.Errorf("expected result to be != nil; got: %v", createIndex2)
- }
-
- // Create no source index
- createNoSourceIndex, err := client.CreateIndex(testNoSourceIndexName).Body(testNoSourceMapping).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createNoSourceIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createNoSourceIndex)
- }
-
- // Create order index
- createOrderIndex, err := client.CreateIndex(testOrderIndex).Body(testOrderMapping).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if createOrderIndex == nil {
- t.Errorf("expected result to be != nil; got: %v", createOrderIndex)
- }
-
- return client
-}
-
-func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client {
- return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
-}
-
-func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client {
- client := setupTestClientAndCreateIndex(t, options...)
-
- // Add tweets
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
- tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
- //comment1 := comment{User: "nico", Comment: "You bet."}
-
- _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Index().Index(testIndexName).Type("doc").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- /*
- _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- */
-
- // Add orders
- var orders []order
- orders = append(orders, order{Article: "Apple MacBook", Manufacturer: "Apple", Price: 1290, Time: "2015-01-18"})
- orders = append(orders, order{Article: "Paper", Manufacturer: "Canon", Price: 100, Time: "2015-03-01"})
- orders = append(orders, order{Article: "Apple iPad", Manufacturer: "Apple", Price: 499, Time: "2015-04-12"})
- orders = append(orders, order{Article: "Dell XPS 13", Manufacturer: "Dell", Price: 1600, Time: "2015-04-18"})
- orders = append(orders, order{Article: "Apple Watch", Manufacturer: "Apple", Price: 349, Time: "2015-04-29"})
- orders = append(orders, order{Article: "Samsung TV", Manufacturer: "Samsung", Price: 790, Time: "2015-05-03"})
- orders = append(orders, order{Article: "Hoodie", Manufacturer: "h&m", Price: 49, Time: "2015-06-03"})
- orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"})
- for i, o := range orders {
- id := fmt.Sprintf("%d", i)
- _, err = client.Index().Index(testOrderIndex).Type("doc").Id(id).BodyJson(&o).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // Flush
- _, err = client.Flush().Index(testIndexName, testOrderIndex).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- return client
-}
-
-func setupTestClientAndCreateIndexAndAddDocsNoSource(t logger, options ...ClientOptionFunc) *Client {
- client := setupTestClientAndCreateIndex(t, options...)
-
- // Add tweets
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
-
- _, err := client.Index().Index(testNoSourceIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Index().Index(testNoSourceIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- // Flush
- _, err = client.Flush().Index(testNoSourceIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- return client
-}
-
-var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
-
-func randomString(n int) string {
- b := make([]rune, n)
- for i := range b {
- b[i] = letters[rand.Intn(len(letters))]
- }
- return string(b)
-}
-
-type lexicographically struct {
- strings []string
-}
-
-func (l lexicographically) Len() int {
- return len(l.strings)
-}
-
-func (l lexicographically) Less(i, j int) bool {
- return l.strings[i] < l.strings[j]
-}
-
-func (l lexicographically) Swap(i, j int) {
- l.strings[i], l.strings[j] = l.strings[j], l.strings[i]
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create.go b/vendor/github.com/olivere/elastic/snapshot_create.go
deleted file mode 100644
index 1bbd2762e..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_create.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "time"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// SnapshotCreateService is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html.
-type SnapshotCreateService struct {
- client *Client
- pretty bool
- repository string
- snapshot string
- masterTimeout string
- waitForCompletion *bool
- bodyJson interface{}
- bodyString string
-}
-
-// NewSnapshotCreateService creates a new SnapshotCreateService.
-func NewSnapshotCreateService(client *Client) *SnapshotCreateService {
- return &SnapshotCreateService{
- client: client,
- }
-}
-
-// Repository is the repository name.
-func (s *SnapshotCreateService) Repository(repository string) *SnapshotCreateService {
- s.repository = repository
- return s
-}
-
-// Snapshot is the snapshot name.
-func (s *SnapshotCreateService) Snapshot(snapshot string) *SnapshotCreateService {
- s.snapshot = snapshot
- return s
-}
-
-// MasterTimeout is documented as: Explicit operation timeout for connection to master node.
-func (s *SnapshotCreateService) MasterTimeout(masterTimeout string) *SnapshotCreateService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// WaitForCompletion is documented as: Should this request wait until the operation has completed before returning.
-func (s *SnapshotCreateService) WaitForCompletion(waitForCompletion bool) *SnapshotCreateService {
- s.waitForCompletion = &waitForCompletion
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *SnapshotCreateService) Pretty(pretty bool) *SnapshotCreateService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is documented as: The snapshot definition.
-func (s *SnapshotCreateService) BodyJson(body interface{}) *SnapshotCreateService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is documented as: The snapshot definition.
-func (s *SnapshotCreateService) BodyString(body string) *SnapshotCreateService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *SnapshotCreateService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_snapshot/{repository}/{snapshot}", map[string]string{
- "snapshot": s.snapshot,
- "repository": s.repository,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.waitForCompletion != nil {
- params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *SnapshotCreateService) Validate() error {
- var invalid []string
- if s.repository == "" {
- invalid = append(invalid, "Repository")
- }
- if s.snapshot == "" {
- invalid = append(invalid, "Snapshot")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *SnapshotCreateService) Do(ctx context.Context) (*SnapshotCreateResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "PUT",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(SnapshotCreateResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// SnapshotShardFailure stores information about failures that occurred during shard snapshotting process.
-type SnapshotShardFailure struct {
- Index string `json:"index"`
- IndexUUID string `json:"index_uuid"`
- ShardID int `json:"shard_id"`
- Reason string `json:"reason"`
- NodeID string `json:"node_id"`
- Status string `json:"status"`
-}
-
-// SnapshotCreateResponse is the response of SnapshotCreateService.Do.
-type SnapshotCreateResponse struct {
- // Accepted indicates whether the request was accepted by elasticsearch.
- // It's available when waitForCompletion is false.
- Accepted *bool `json:"accepted"`
-
- // Snapshot is available when waitForCompletion is true.
- Snapshot *struct {
- Snapshot string `json:"snapshot"`
- UUID string `json:"uuid"`
- VersionID int `json:"version_id"`
- Version string `json:"version"`
- Indices []string `json:"indices"`
- State string `json:"state"`
- Reason string `json:"reason"`
- StartTime time.Time `json:"start_time"`
- StartTimeInMillis int64 `json:"start_time_in_millis"`
- EndTime time.Time `json:"end_time"`
- EndTimeInMillis int64 `json:"end_time_in_millis"`
- DurationInMillis int64 `json:"duration_in_millis"`
- Failures []SnapshotShardFailure `json:"failures"`
- Shards shardsInfo `json:"shards"`
- } `json:"snapshot"`
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create_repository.go b/vendor/github.com/olivere/elastic/snapshot_create_repository.go
deleted file mode 100644
index e7f6d5336..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_create_repository.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// SnapshotCreateRepositoryService creates a snapshot repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
-// for details.
-type SnapshotCreateRepositoryService struct {
- client *Client
- pretty bool
- repository string
- masterTimeout string
- timeout string
- verify *bool
- typ string
- settings map[string]interface{}
- bodyJson interface{}
- bodyString string
-}
-
-// NewSnapshotCreateRepositoryService creates a new SnapshotCreateRepositoryService.
-func NewSnapshotCreateRepositoryService(client *Client) *SnapshotCreateRepositoryService {
- return &SnapshotCreateRepositoryService{
- client: client,
- }
-}
-
-// Repository is the repository name.
-func (s *SnapshotCreateRepositoryService) Repository(repository string) *SnapshotCreateRepositoryService {
- s.repository = repository
- return s
-}
-
-// MasterTimeout specifies an explicit operation timeout for connection to master node.
-func (s *SnapshotCreateRepositoryService) MasterTimeout(masterTimeout string) *SnapshotCreateRepositoryService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *SnapshotCreateRepositoryService) Timeout(timeout string) *SnapshotCreateRepositoryService {
- s.timeout = timeout
- return s
-}
-
-// Verify indicates whether to verify the repository after creation.
-func (s *SnapshotCreateRepositoryService) Verify(verify bool) *SnapshotCreateRepositoryService {
- s.verify = &verify
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *SnapshotCreateRepositoryService) Pretty(pretty bool) *SnapshotCreateRepositoryService {
- s.pretty = pretty
- return s
-}
-
-// Type sets the snapshot repository type, e.g. "fs".
-func (s *SnapshotCreateRepositoryService) Type(typ string) *SnapshotCreateRepositoryService {
- s.typ = typ
- return s
-}
-
-// Settings sets all settings of the snapshot repository.
-func (s *SnapshotCreateRepositoryService) Settings(settings map[string]interface{}) *SnapshotCreateRepositoryService {
- s.settings = settings
- return s
-}
-
-// Setting sets a single settings of the snapshot repository.
-func (s *SnapshotCreateRepositoryService) Setting(name string, value interface{}) *SnapshotCreateRepositoryService {
- if s.settings == nil {
- s.settings = make(map[string]interface{})
- }
- s.settings[name] = value
- return s
-}
-
-// BodyJson is documented as: The repository definition.
-func (s *SnapshotCreateRepositoryService) BodyJson(body interface{}) *SnapshotCreateRepositoryService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is documented as: The repository definition.
-func (s *SnapshotCreateRepositoryService) BodyString(body string) *SnapshotCreateRepositoryService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *SnapshotCreateRepositoryService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_snapshot/{repository}", map[string]string{
- "repository": s.repository,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.verify != nil {
- params.Set("verify", fmt.Sprintf("%v", *s.verify))
- }
- return path, params, nil
-}
-
-// buildBody builds the body for the operation.
-func (s *SnapshotCreateRepositoryService) buildBody() (interface{}, error) {
- if s.bodyJson != nil {
- return s.bodyJson, nil
- }
- if s.bodyString != "" {
- return s.bodyString, nil
- }
-
- body := map[string]interface{}{
- "type": s.typ,
- }
- if len(s.settings) > 0 {
- body["settings"] = s.settings
- }
- return body, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *SnapshotCreateRepositoryService) Validate() error {
- var invalid []string
- if s.repository == "" {
- invalid = append(invalid, "Repository")
- }
- if s.bodyString == "" && s.bodyJson == nil {
- invalid = append(invalid, "BodyJson")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *SnapshotCreateRepositoryService) Do(ctx context.Context) (*SnapshotCreateRepositoryResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- body, err := s.buildBody()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "PUT",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(SnapshotCreateRepositoryResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// SnapshotCreateRepositoryResponse is the response of SnapshotCreateRepositoryService.Do.
-type SnapshotCreateRepositoryResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_create_repository_test.go
deleted file mode 100644
index 2045c700d..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_create_repository_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSnapshotPutRepositoryURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Repository string
- Expected string
- }{
- {
- "repo",
- "/_snapshot/repo",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.SnapshotCreateRepository(test.Repository).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestSnapshotPutRepositoryBody(t *testing.T) {
- client := setupTestClient(t)
-
- service := client.SnapshotCreateRepository("my_backup")
- service = service.Type("fs").
- Settings(map[string]interface{}{
- "location": "my_backup_location",
- "compress": false,
- }).
- Setting("compress", true).
- Setting("chunk_size", 16*1024*1024)
-
- src, err := service.buildBody()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"settings":{"chunk_size":16777216,"compress":true,"location":"my_backup_location"},"type":"fs"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create_test.go b/vendor/github.com/olivere/elastic/snapshot_create_test.go
deleted file mode 100644
index 74b009cfe..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_create_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package elastic
-
-import (
- "net/url"
- "reflect"
- "testing"
-)
-
-func TestSnapshotValidate(t *testing.T) {
- var client *Client
-
- err := NewSnapshotCreateService(client).Validate()
- got := err.Error()
- expected := "missing required fields: [Repository Snapshot]"
- if got != expected {
- t.Errorf("expected %q; got: %q", expected, got)
- }
-}
-
-func TestSnapshotPutURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Repository string
- Snapshot string
- Pretty bool
- MasterTimeout string
- WaitForCompletion bool
- ExpectedPath string
- ExpectedParams url.Values
- }{
- {
- Repository: "repo",
- Snapshot: "snapshot_of_sunday",
- Pretty: true,
- MasterTimeout: "60s",
- WaitForCompletion: true,
- ExpectedPath: "/_snapshot/repo/snapshot_of_sunday",
- ExpectedParams: url.Values{
- "pretty": []string{"true"},
- "master_timeout": []string{"60s"},
- "wait_for_completion": []string{"true"},
- },
- },
- }
-
- for _, test := range tests {
- path, params, err := client.SnapshotCreate(test.Repository, test.Snapshot).
- Pretty(test.Pretty).
- MasterTimeout(test.MasterTimeout).
- WaitForCompletion(test.WaitForCompletion).
- buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.ExpectedPath {
- t.Errorf("expected %q; got: %q", test.ExpectedPath, path)
- }
- if !reflect.DeepEqual(params, test.ExpectedParams) {
- t.Errorf("expected %q; got: %q", test.ExpectedParams, params)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_delete_repository.go b/vendor/github.com/olivere/elastic/snapshot_delete_repository.go
deleted file mode 100644
index ad3e49b0e..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_delete_repository.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// SnapshotDeleteRepositoryService deletes a snapshot repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
-// for details.
-type SnapshotDeleteRepositoryService struct {
- client *Client
- pretty bool
- repository []string
- masterTimeout string
- timeout string
-}
-
-// NewSnapshotDeleteRepositoryService creates a new SnapshotDeleteRepositoryService.
-func NewSnapshotDeleteRepositoryService(client *Client) *SnapshotDeleteRepositoryService {
- return &SnapshotDeleteRepositoryService{
- client: client,
- repository: make([]string, 0),
- }
-}
-
-// Repository is the list of repository names.
-func (s *SnapshotDeleteRepositoryService) Repository(repositories ...string) *SnapshotDeleteRepositoryService {
- s.repository = append(s.repository, repositories...)
- return s
-}
-
-// MasterTimeout specifies an explicit operation timeout for connection to master node.
-func (s *SnapshotDeleteRepositoryService) MasterTimeout(masterTimeout string) *SnapshotDeleteRepositoryService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *SnapshotDeleteRepositoryService) Timeout(timeout string) *SnapshotDeleteRepositoryService {
- s.timeout = timeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *SnapshotDeleteRepositoryService) Pretty(pretty bool) *SnapshotDeleteRepositoryService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *SnapshotDeleteRepositoryService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_snapshot/{repository}", map[string]string{
- "repository": strings.Join(s.repository, ","),
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *SnapshotDeleteRepositoryService) Validate() error {
- var invalid []string
- if len(s.repository) == 0 {
- invalid = append(invalid, "Repository")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *SnapshotDeleteRepositoryService) Do(ctx context.Context) (*SnapshotDeleteRepositoryResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "DELETE",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(SnapshotDeleteRepositoryResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// SnapshotDeleteRepositoryResponse is the response of SnapshotDeleteRepositoryService.Do.
-type SnapshotDeleteRepositoryResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
- Index string `json:"index,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go
deleted file mode 100644
index aec793a60..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestSnapshotDeleteRepositoryURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Repository []string
- Expected string
- }{
- {
- []string{"repo1"},
- "/_snapshot/repo1",
- },
- {
- []string{"repo1", "repo2"},
- "/_snapshot/repo1%2Crepo2",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.SnapshotDeleteRepository(test.Repository...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_get_repository.go b/vendor/github.com/olivere/elastic/snapshot_get_repository.go
deleted file mode 100644
index 2d24c5e4c..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_get_repository.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// SnapshotGetRepositoryService reads a snapshot repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
-// for details.
-type SnapshotGetRepositoryService struct {
- client *Client
- pretty bool
- repository []string
- local *bool
- masterTimeout string
-}
-
-// NewSnapshotGetRepositoryService creates a new SnapshotGetRepositoryService.
-func NewSnapshotGetRepositoryService(client *Client) *SnapshotGetRepositoryService {
- return &SnapshotGetRepositoryService{
- client: client,
- repository: make([]string, 0),
- }
-}
-
-// Repository is the list of repository names.
-func (s *SnapshotGetRepositoryService) Repository(repositories ...string) *SnapshotGetRepositoryService {
- s.repository = append(s.repository, repositories...)
- return s
-}
-
-// Local indicates whether to return local information, i.e. do not retrieve the state from master node (default: false).
-func (s *SnapshotGetRepositoryService) Local(local bool) *SnapshotGetRepositoryService {
- s.local = &local
- return s
-}
-
-// MasterTimeout specifies an explicit operation timeout for connection to master node.
-func (s *SnapshotGetRepositoryService) MasterTimeout(masterTimeout string) *SnapshotGetRepositoryService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *SnapshotGetRepositoryService) Pretty(pretty bool) *SnapshotGetRepositoryService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *SnapshotGetRepositoryService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.repository) > 0 {
- path, err = uritemplates.Expand("/_snapshot/{repository}", map[string]string{
- "repository": strings.Join(s.repository, ","),
- })
- } else {
- path = "/_snapshot"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.local != nil {
- params.Set("local", fmt.Sprintf("%v", *s.local))
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *SnapshotGetRepositoryService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *SnapshotGetRepositoryService) Do(ctx context.Context) (SnapshotGetRepositoryResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- var ret SnapshotGetRepositoryResponse
- if err := json.Unmarshal(res.Body, &ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// SnapshotGetRepositoryResponse is the response of SnapshotGetRepositoryService.Do.
-type SnapshotGetRepositoryResponse map[string]*SnapshotRepositoryMetaData
-
-// SnapshotRepositoryMetaData contains all information about
-// a single snapshot repository.
-type SnapshotRepositoryMetaData struct {
- Type string `json:"type"`
- Settings map[string]interface{} `json:"settings,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_get_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_get_repository_test.go
deleted file mode 100644
index 0dcd0bb90..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_get_repository_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestSnapshotGetRepositoryURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Repository []string
- Expected string
- }{
- {
- []string{},
- "/_snapshot",
- },
- {
- []string{"repo1"},
- "/_snapshot/repo1",
- },
- {
- []string{"repo1", "repo2"},
- "/_snapshot/repo1%2Crepo2",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.SnapshotGetRepository(test.Repository...).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_verify_repository.go b/vendor/github.com/olivere/elastic/snapshot_verify_repository.go
deleted file mode 100644
index 5494ab475..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_verify_repository.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// SnapshotVerifyRepositoryService verifies a snapshop repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
-// for details.
-type SnapshotVerifyRepositoryService struct {
- client *Client
- pretty bool
- repository string
- masterTimeout string
- timeout string
-}
-
-// NewSnapshotVerifyRepositoryService creates a new SnapshotVerifyRepositoryService.
-func NewSnapshotVerifyRepositoryService(client *Client) *SnapshotVerifyRepositoryService {
- return &SnapshotVerifyRepositoryService{
- client: client,
- }
-}
-
-// Repository specifies the repository name.
-func (s *SnapshotVerifyRepositoryService) Repository(repository string) *SnapshotVerifyRepositoryService {
- s.repository = repository
- return s
-}
-
-// MasterTimeout is the explicit operation timeout for connection to master node.
-func (s *SnapshotVerifyRepositoryService) MasterTimeout(masterTimeout string) *SnapshotVerifyRepositoryService {
- s.masterTimeout = masterTimeout
- return s
-}
-
-// Timeout is an explicit operation timeout.
-func (s *SnapshotVerifyRepositoryService) Timeout(timeout string) *SnapshotVerifyRepositoryService {
- s.timeout = timeout
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *SnapshotVerifyRepositoryService) Pretty(pretty bool) *SnapshotVerifyRepositoryService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *SnapshotVerifyRepositoryService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_snapshot/{repository}/_verify", map[string]string{
- "repository": s.repository,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.masterTimeout != "" {
- params.Set("master_timeout", s.masterTimeout)
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *SnapshotVerifyRepositoryService) Validate() error {
- var invalid []string
- if s.repository == "" {
- invalid = append(invalid, "Repository")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *SnapshotVerifyRepositoryService) Do(ctx context.Context) (*SnapshotVerifyRepositoryResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(SnapshotVerifyRepositoryResponse)
- if err := json.Unmarshal(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// SnapshotVerifyRepositoryResponse is the response of SnapshotVerifyRepositoryService.Do.
-type SnapshotVerifyRepositoryResponse struct {
- Nodes map[string]*SnapshotVerifyRepositoryNode `json:"nodes"`
-}
-
-type SnapshotVerifyRepositoryNode struct {
- Name string `json:"name"`
-}
diff --git a/vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go
deleted file mode 100644
index 9776782d2..000000000
--- a/vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestSnapshotVerifyRepositoryURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Repository string
- Expected string
- }{
- {
- "repo",
- "/_snapshot/repo/_verify",
- },
- }
-
- for _, test := range tests {
- path, _, err := client.SnapshotVerifyRepository(test.Repository).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/sort.go b/vendor/github.com/olivere/elastic/sort.go
deleted file mode 100644
index 7e2b32183..000000000
--- a/vendor/github.com/olivere/elastic/sort.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// -- Sorter --
-
-// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html.
-type Sorter interface {
- Source() (interface{}, error)
-}
-
-// -- SortInfo --
-
-// SortInfo contains information about sorting a field.
-type SortInfo struct {
- Sorter
- Field string
- Ascending bool
- Missing interface{}
- IgnoreUnmapped *bool
- UnmappedType string
- SortMode string
- NestedFilter Query
- NestedPath string
- NestedSort *NestedSort // available in 6.1 or later
-}
-
-func (info SortInfo) Source() (interface{}, error) {
- prop := make(map[string]interface{})
- if info.Ascending {
- prop["order"] = "asc"
- } else {
- prop["order"] = "desc"
- }
- if info.Missing != nil {
- prop["missing"] = info.Missing
- }
- if info.IgnoreUnmapped != nil {
- prop["ignore_unmapped"] = *info.IgnoreUnmapped
- }
- if info.UnmappedType != "" {
- prop["unmapped_type"] = info.UnmappedType
- }
- if info.SortMode != "" {
- prop["mode"] = info.SortMode
- }
- if info.NestedFilter != nil {
- src, err := info.NestedFilter.Source()
- if err != nil {
- return nil, err
- }
- prop["nested_filter"] = src
- }
- if info.NestedPath != "" {
- prop["nested_path"] = info.NestedPath
- }
- if info.NestedSort != nil {
- src, err := info.NestedSort.Source()
- if err != nil {
- return nil, err
- }
- prop["nested"] = src
- }
- source := make(map[string]interface{})
- source[info.Field] = prop
- return source, nil
-}
-
-// -- SortByDoc --
-
-// SortByDoc sorts by the "_doc" field, as described in
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html.
-//
-// Example:
-// ss := elastic.NewSearchSource()
-// ss = ss.SortBy(elastic.SortByDoc{})
-type SortByDoc struct {
- Sorter
-}
-
-// Source returns the JSON-serializable data.
-func (s SortByDoc) Source() (interface{}, error) {
- return "_doc", nil
-}
-
-// -- ScoreSort --
-
-// ScoreSort sorts by relevancy score.
-type ScoreSort struct {
- Sorter
- ascending bool
-}
-
-// NewScoreSort creates a new ScoreSort.
-func NewScoreSort() *ScoreSort {
- return &ScoreSort{ascending: false} // Descending by default!
-}
-
-// Order defines whether sorting ascending (default) or descending.
-func (s *ScoreSort) Order(ascending bool) *ScoreSort {
- s.ascending = ascending
- return s
-}
-
-// Asc sets ascending sort order.
-func (s *ScoreSort) Asc() *ScoreSort {
- s.ascending = true
- return s
-}
-
-// Desc sets descending sort order.
-func (s *ScoreSort) Desc() *ScoreSort {
- s.ascending = false
- return s
-}
-
-// Source returns the JSON-serializable data.
-func (s *ScoreSort) Source() (interface{}, error) {
- source := make(map[string]interface{})
- x := make(map[string]interface{})
- source["_score"] = x
- if s.ascending {
- x["order"] = "asc"
- } else {
- x["order"] = "desc"
- }
- return source, nil
-}
-
-// -- FieldSort --
-
-// FieldSort sorts by a given field.
-type FieldSort struct {
- Sorter
- fieldName string
- ascending bool
- missing interface{}
- unmappedType *string
- sortMode *string
- nestedFilter Query
- nestedPath *string
- nestedSort *NestedSort
-}
-
-// NewFieldSort creates a new FieldSort.
-func NewFieldSort(fieldName string) *FieldSort {
- return &FieldSort{
- fieldName: fieldName,
- ascending: true,
- }
-}
-
-// FieldName specifies the name of the field to be used for sorting.
-func (s *FieldSort) FieldName(fieldName string) *FieldSort {
- s.fieldName = fieldName
- return s
-}
-
-// Order defines whether sorting ascending (default) or descending.
-func (s *FieldSort) Order(ascending bool) *FieldSort {
- s.ascending = ascending
- return s
-}
-
-// Asc sets ascending sort order.
-func (s *FieldSort) Asc() *FieldSort {
- s.ascending = true
- return s
-}
-
-// Desc sets descending sort order.
-func (s *FieldSort) Desc() *FieldSort {
- s.ascending = false
- return s
-}
-
-// Missing sets the value to be used when a field is missing in a document.
-// You can also use "_last" or "_first" to sort missing last or first
-// respectively.
-func (s *FieldSort) Missing(missing interface{}) *FieldSort {
- s.missing = missing
- return s
-}
-
-// UnmappedType sets the type to use when the current field is not mapped
-// in an index.
-func (s *FieldSort) UnmappedType(typ string) *FieldSort {
- s.unmappedType = &typ
- return s
-}
-
-// SortMode specifies what values to pick in case a document contains
-// multiple values for the targeted sort field. Possible values are:
-// min, max, sum, and avg.
-func (s *FieldSort) SortMode(sortMode string) *FieldSort {
- s.sortMode = &sortMode
- return s
-}
-
-// NestedFilter sets a filter that nested objects should match with
-// in order to be taken into account for sorting.
-func (s *FieldSort) NestedFilter(nestedFilter Query) *FieldSort {
- s.nestedFilter = nestedFilter
- return s
-}
-
-// NestedPath is used if sorting occurs on a field that is inside a
-// nested object.
-func (s *FieldSort) NestedPath(nestedPath string) *FieldSort {
- s.nestedPath = &nestedPath
- return s
-}
-
-// NestedSort is available starting with 6.1 and will replace NestedFilter
-// and NestedPath.
-func (s *FieldSort) NestedSort(nestedSort *NestedSort) *FieldSort {
- s.nestedSort = nestedSort
- return s
-}
-
-// Source returns the JSON-serializable data.
-func (s *FieldSort) Source() (interface{}, error) {
- source := make(map[string]interface{})
- x := make(map[string]interface{})
- source[s.fieldName] = x
- if s.ascending {
- x["order"] = "asc"
- } else {
- x["order"] = "desc"
- }
- if s.missing != nil {
- x["missing"] = s.missing
- }
- if s.unmappedType != nil {
- x["unmapped_type"] = *s.unmappedType
- }
- if s.sortMode != nil {
- x["mode"] = *s.sortMode
- }
- if s.nestedFilter != nil {
- src, err := s.nestedFilter.Source()
- if err != nil {
- return nil, err
- }
- x["nested_filter"] = src
- }
- if s.nestedPath != nil {
- x["nested_path"] = *s.nestedPath
- }
- if s.nestedSort != nil {
- src, err := s.nestedSort.Source()
- if err != nil {
- return nil, err
- }
- x["nested"] = src
- }
- return source, nil
-}
-
-// -- GeoDistanceSort --
-
-// GeoDistanceSort allows for sorting by geographic distance.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html#_geo_distance_sorting.
-type GeoDistanceSort struct {
- Sorter
- fieldName string
- points []*GeoPoint
- geohashes []string
- distanceType *string
- unit string
- ascending bool
- sortMode *string
- nestedFilter Query
- nestedPath *string
- nestedSort *NestedSort
-}
-
-// NewGeoDistanceSort creates a new sorter for geo distances.
-func NewGeoDistanceSort(fieldName string) *GeoDistanceSort {
- return &GeoDistanceSort{
- fieldName: fieldName,
- ascending: true,
- }
-}
-
-// FieldName specifies the name of the (geo) field to use for sorting.
-func (s *GeoDistanceSort) FieldName(fieldName string) *GeoDistanceSort {
- s.fieldName = fieldName
- return s
-}
-
-// Order defines whether sorting ascending (default) or descending.
-func (s *GeoDistanceSort) Order(ascending bool) *GeoDistanceSort {
- s.ascending = ascending
- return s
-}
-
-// Asc sets ascending sort order.
-func (s *GeoDistanceSort) Asc() *GeoDistanceSort {
- s.ascending = true
- return s
-}
-
-// Desc sets descending sort order.
-func (s *GeoDistanceSort) Desc() *GeoDistanceSort {
- s.ascending = false
- return s
-}
-
-// Point specifies a point to create the range distance aggregations from.
-func (s *GeoDistanceSort) Point(lat, lon float64) *GeoDistanceSort {
- s.points = append(s.points, GeoPointFromLatLon(lat, lon))
- return s
-}
-
-// Points specifies the geo point(s) to create the range distance aggregations from.
-func (s *GeoDistanceSort) Points(points ...*GeoPoint) *GeoDistanceSort {
- s.points = append(s.points, points...)
- return s
-}
-
-// GeoHashes specifies the geo point to create the range distance aggregations from.
-func (s *GeoDistanceSort) GeoHashes(geohashes ...string) *GeoDistanceSort {
- s.geohashes = append(s.geohashes, geohashes...)
- return s
-}
-
-// Unit specifies the distance unit to use. It defaults to km.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/common-options.html#distance-units
-// for details.
-func (s *GeoDistanceSort) Unit(unit string) *GeoDistanceSort {
- s.unit = unit
- return s
-}
-
-// GeoDistance is an alias for DistanceType.
-func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort {
- return s.DistanceType(geoDistance)
-}
-
-// DistanceType describes how to compute the distance, e.g. "arc" or "plane".
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html#geo-sorting
-// for details.
-func (s *GeoDistanceSort) DistanceType(distanceType string) *GeoDistanceSort {
- s.distanceType = &distanceType
- return s
-}
-
-// SortMode specifies what values to pick in case a document contains
-// multiple values for the targeted sort field. Possible values are:
-// min, max, sum, and avg.
-func (s *GeoDistanceSort) SortMode(sortMode string) *GeoDistanceSort {
- s.sortMode = &sortMode
- return s
-}
-
-// NestedFilter sets a filter that nested objects should match with
-// in order to be taken into account for sorting.
-func (s *GeoDistanceSort) NestedFilter(nestedFilter Query) *GeoDistanceSort {
- s.nestedFilter = nestedFilter
- return s
-}
-
-// NestedPath is used if sorting occurs on a field that is inside a
-// nested object.
-func (s *GeoDistanceSort) NestedPath(nestedPath string) *GeoDistanceSort {
- s.nestedPath = &nestedPath
- return s
-}
-
-// NestedSort is available starting with 6.1 and will replace NestedFilter
-// and NestedPath.
-func (s *GeoDistanceSort) NestedSort(nestedSort *NestedSort) *GeoDistanceSort {
- s.nestedSort = nestedSort
- return s
-}
-
-// Source returns the JSON-serializable data.
-func (s *GeoDistanceSort) Source() (interface{}, error) {
- source := make(map[string]interface{})
- x := make(map[string]interface{})
- source["_geo_distance"] = x
-
- // Points
- var ptarr []interface{}
- for _, pt := range s.points {
- ptarr = append(ptarr, pt.Source())
- }
- for _, geohash := range s.geohashes {
- ptarr = append(ptarr, geohash)
- }
- x[s.fieldName] = ptarr
-
- if s.unit != "" {
- x["unit"] = s.unit
- }
- if s.distanceType != nil {
- x["distance_type"] = *s.distanceType
- }
-
- if s.ascending {
- x["order"] = "asc"
- } else {
- x["order"] = "desc"
- }
- if s.sortMode != nil {
- x["mode"] = *s.sortMode
- }
- if s.nestedFilter != nil {
- src, err := s.nestedFilter.Source()
- if err != nil {
- return nil, err
- }
- x["nested_filter"] = src
- }
- if s.nestedPath != nil {
- x["nested_path"] = *s.nestedPath
- }
- if s.nestedSort != nil {
- src, err := s.nestedSort.Source()
- if err != nil {
- return nil, err
- }
- x["nested"] = src
- }
- return source, nil
-}
-
-// -- ScriptSort --
-
-// ScriptSort sorts by a custom script. See
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html#modules-scripting
-// for details about scripting.
-type ScriptSort struct {
- Sorter
- script *Script
- typ string
- ascending bool
- sortMode *string
- nestedFilter Query
- nestedPath *string
- nestedSort *NestedSort
-}
-
-// NewScriptSort creates and initializes a new ScriptSort.
-// You must provide a script and a type, e.g. "string" or "number".
-func NewScriptSort(script *Script, typ string) *ScriptSort {
- return &ScriptSort{
- script: script,
- typ: typ,
- ascending: true,
- }
-}
-
-// Type sets the script type, which can be either "string" or "number".
-func (s *ScriptSort) Type(typ string) *ScriptSort {
- s.typ = typ
- return s
-}
-
-// Order defines whether sorting ascending (default) or descending.
-func (s *ScriptSort) Order(ascending bool) *ScriptSort {
- s.ascending = ascending
- return s
-}
-
-// Asc sets ascending sort order.
-func (s *ScriptSort) Asc() *ScriptSort {
- s.ascending = true
- return s
-}
-
-// Desc sets descending sort order.
-func (s *ScriptSort) Desc() *ScriptSort {
- s.ascending = false
- return s
-}
-
-// SortMode specifies what values to pick in case a document contains
-// multiple values for the targeted sort field. Possible values are:
-// min or max.
-func (s *ScriptSort) SortMode(sortMode string) *ScriptSort {
- s.sortMode = &sortMode
- return s
-}
-
-// NestedFilter sets a filter that nested objects should match with
-// in order to be taken into account for sorting.
-func (s *ScriptSort) NestedFilter(nestedFilter Query) *ScriptSort {
- s.nestedFilter = nestedFilter
- return s
-}
-
-// NestedPath is used if sorting occurs on a field that is inside a
-// nested object.
-func (s *ScriptSort) NestedPath(nestedPath string) *ScriptSort {
- s.nestedPath = &nestedPath
- return s
-}
-
-// NestedSort is available starting with 6.1 and will replace NestedFilter
-// and NestedPath.
-func (s *ScriptSort) NestedSort(nestedSort *NestedSort) *ScriptSort {
- s.nestedSort = nestedSort
- return s
-}
-
-// Source returns the JSON-serializable data.
-func (s *ScriptSort) Source() (interface{}, error) {
- if s.script == nil {
- return nil, errors.New("ScriptSort expected a script")
- }
- source := make(map[string]interface{})
- x := make(map[string]interface{})
- source["_script"] = x
-
- src, err := s.script.Source()
- if err != nil {
- return nil, err
- }
- x["script"] = src
-
- x["type"] = s.typ
-
- if s.ascending {
- x["order"] = "asc"
- } else {
- x["order"] = "desc"
- }
- if s.sortMode != nil {
- x["mode"] = *s.sortMode
- }
- if s.nestedFilter != nil {
- src, err := s.nestedFilter.Source()
- if err != nil {
- return nil, err
- }
- x["nested_filter"] = src
- }
- if s.nestedPath != nil {
- x["nested_path"] = *s.nestedPath
- }
- if s.nestedSort != nil {
- src, err := s.nestedSort.Source()
- if err != nil {
- return nil, err
- }
- x["nested"] = src
- }
- return source, nil
-}
-
-// -- NestedSort --
-
-// NestedSort is used for fields that are inside a nested object.
-// It takes a "path" argument and an optional nested filter that the
-// nested objects should match with in order to be taken into account
-// for sorting.
-//
-// NestedSort is available from 6.1 and replaces nestedFilter and nestedPath
-// in the other sorters.
-type NestedSort struct {
- Sorter
- path string
- filter Query
- nestedSort *NestedSort
-}
-
-// NewNestedSort creates a new NestedSort.
-func NewNestedSort(path string) *NestedSort {
- return &NestedSort{path: path}
-}
-
-// Filter sets the filter.
-func (s *NestedSort) Filter(filter Query) *NestedSort {
- s.filter = filter
- return s
-}
-
-// NestedSort embeds another level of nested sorting.
-func (s *NestedSort) NestedSort(nestedSort *NestedSort) *NestedSort {
- s.nestedSort = nestedSort
- return s
-}
-
-// Source returns the JSON-serializable data.
-func (s *NestedSort) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- if s.path != "" {
- source["path"] = s.path
- }
- if s.filter != nil {
- src, err := s.filter.Source()
- if err != nil {
- return nil, err
- }
- source["filter"] = src
- }
- if s.nestedSort != nil {
- src, err := s.nestedSort.Source()
- if err != nil {
- return nil, err
- }
- source["nested"] = src
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/sort_test.go b/vendor/github.com/olivere/elastic/sort_test.go
deleted file mode 100644
index b54cbd98c..000000000
--- a/vendor/github.com/olivere/elastic/sort_test.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSortInfo(t *testing.T) {
- builder := SortInfo{Field: "grade", Ascending: false}
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"grade":{"order":"desc"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSortInfoComplex(t *testing.T) {
- builder := SortInfo{
- Field: "price",
- Ascending: false,
- Missing: "_last",
- SortMode: "avg",
- NestedFilter: NewTermQuery("product.color", "blue"),
- NestedPath: "variant",
- }
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestScoreSort(t *testing.T) {
- builder := NewScoreSort()
- if builder.ascending != false {
- t.Error("expected score sorter to be ascending by default")
- }
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_score":{"order":"desc"}}` // ScoreSort is "desc" by default
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestScoreSortOrderAscending(t *testing.T) {
- builder := NewScoreSort().Asc()
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_score":{"order":"asc"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestScoreSortOrderDescending(t *testing.T) {
- builder := NewScoreSort().Desc()
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_score":{"order":"desc"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldSort(t *testing.T) {
- builder := NewFieldSort("grade")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"grade":{"order":"asc"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldSortOrderDesc(t *testing.T) {
- builder := NewFieldSort("grade").Desc()
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"grade":{"order":"desc"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldSortComplex(t *testing.T) {
- builder := NewFieldSort("price").Desc().
- SortMode("avg").
- Missing("_last").
- UnmappedType("product").
- NestedFilter(NewTermQuery("product.color", "blue")).
- NestedPath("variant")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc","unmapped_type":"product"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoDistanceSort(t *testing.T) {
- builder := NewGeoDistanceSort("pin.location").
- Point(-70, 40).
- Order(true).
- Unit("km").
- SortMode("min").
- GeoDistance("plane")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_geo_distance":{"distance_type":"plane","mode":"min","order":"asc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestGeoDistanceSortOrderDesc(t *testing.T) {
- builder := NewGeoDistanceSort("pin.location").
- Point(-70, 40).
- Unit("km").
- SortMode("min").
- GeoDistance("arc").
- Desc()
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_geo_distance":{"distance_type":"arc","mode":"min","order":"desc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-func TestScriptSort(t *testing.T) {
- builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Order(true)
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_script":{"order":"asc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestScriptSortOrderDesc(t *testing.T) {
- builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Desc()
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"_script":{"order":"desc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestNestedSort(t *testing.T) {
- builder := NewNestedSort("offer").
- Filter(NewTermQuery("offer.color", "blue"))
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"filter":{"term":{"offer.color":"blue"}},"path":"offer"}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldSortWithNestedSort(t *testing.T) {
- builder := NewFieldSort("offer.price").
- Asc().
- SortMode("avg").
- NestedSort(
- NewNestedSort("offer").Filter(NewTermQuery("offer.color", "blue")),
- )
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"offer.price":{"mode":"avg","nested":{"filter":{"term":{"offer.color":"blue"}},"path":"offer"},"order":"asc"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/suggest_field.go b/vendor/github.com/olivere/elastic/suggest_field.go
deleted file mode 100644
index 8405a6f9e..000000000
--- a/vendor/github.com/olivere/elastic/suggest_field.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "errors"
-)
-
-// SuggestField can be used by the caller to specify a suggest field
-// at index time. For a detailed example, see e.g.
-// https://www.elastic.co/blog/you-complete-me.
-type SuggestField struct {
- inputs []string
- weight int
- contextQueries []SuggesterContextQuery
-}
-
-func NewSuggestField(input ...string) *SuggestField {
- return &SuggestField{
- inputs: input,
- weight: -1,
- }
-}
-
-func (f *SuggestField) Input(input ...string) *SuggestField {
- if f.inputs == nil {
- f.inputs = make([]string, 0)
- }
- f.inputs = append(f.inputs, input...)
- return f
-}
-
-func (f *SuggestField) Weight(weight int) *SuggestField {
- f.weight = weight
- return f
-}
-
-func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField {
- f.contextQueries = append(f.contextQueries, queries...)
- return f
-}
-
-// MarshalJSON encodes SuggestField into JSON.
-func (f *SuggestField) MarshalJSON() ([]byte, error) {
- source := make(map[string]interface{})
-
- if f.inputs != nil {
- switch len(f.inputs) {
- case 1:
- source["input"] = f.inputs[0]
- default:
- source["input"] = f.inputs
- }
- }
-
- if f.weight >= 0 {
- source["weight"] = f.weight
- }
-
- switch len(f.contextQueries) {
- case 0:
- case 1:
- src, err := f.contextQueries[0].Source()
- if err != nil {
- return nil, err
- }
- source["contexts"] = src
- default:
- ctxq := make(map[string]interface{})
- for _, query := range f.contextQueries {
- src, err := query.Source()
- if err != nil {
- return nil, err
- }
- m, ok := src.(map[string]interface{})
- if !ok {
- return nil, errors.New("SuggesterContextQuery must be of type map[string]interface{}")
- }
- for k, v := range m {
- ctxq[k] = v
- }
- }
- source["contexts"] = ctxq
- }
-
- return json.Marshal(source)
-}
diff --git a/vendor/github.com/olivere/elastic/suggest_field_test.go b/vendor/github.com/olivere/elastic/suggest_field_test.go
deleted file mode 100644
index 426875b2f..000000000
--- a/vendor/github.com/olivere/elastic/suggest_field_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSuggestField(t *testing.T) {
- field := NewSuggestField().
- Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch").
- Weight(1).
- ContextQuery(
- NewSuggesterCategoryMapping("color").FieldName("color_field").DefaultValues("red", "green", "blue"),
- NewSuggesterGeoMapping("location").Precision("5m").Neighbors(true).DefaultLocations(GeoPointFromLatLon(52.516275, 13.377704)),
- )
- data, err := json.Marshal(field)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"contexts":{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"},"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}},"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"weight":1}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/suggester.go b/vendor/github.com/olivere/elastic/suggester.go
deleted file mode 100644
index f7dc48f90..000000000
--- a/vendor/github.com/olivere/elastic/suggester.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// Represents the generic suggester interface.
-// A suggester's only purpose is to return the
-// source of the query as a JSON-serializable
-// object. Returning a map[string]interface{}
-// will do.
-type Suggester interface {
- Name() string
- Source(includeName bool) (interface{}, error)
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_completion.go b/vendor/github.com/olivere/elastic/suggester_completion.go
deleted file mode 100644
index d2b4a326c..000000000
--- a/vendor/github.com/olivere/elastic/suggester_completion.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// CompletionSuggester is a fast suggester for e.g. type-ahead completion.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-completion.html
-// for more details.
-type CompletionSuggester struct {
- Suggester
- name string
- text string
- prefix string
- regex string
- field string
- analyzer string
- size *int
- shardSize *int
- contextQueries []SuggesterContextQuery
- payload interface{}
-
- fuzzyOptions *FuzzyCompletionSuggesterOptions
- regexOptions *RegexCompletionSuggesterOptions
- skipDuplicates *bool
-}
-
-// Creates a new completion suggester.
-func NewCompletionSuggester(name string) *CompletionSuggester {
- return &CompletionSuggester{
- name: name,
- }
-}
-
-func (q *CompletionSuggester) Name() string {
- return q.name
-}
-
-func (q *CompletionSuggester) Text(text string) *CompletionSuggester {
- q.text = text
- return q
-}
-
-func (q *CompletionSuggester) Prefix(prefix string) *CompletionSuggester {
- q.prefix = prefix
- return q
-}
-
-func (q *CompletionSuggester) PrefixWithEditDistance(prefix string, editDistance interface{}) *CompletionSuggester {
- q.prefix = prefix
- q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions().EditDistance(editDistance)
- return q
-}
-
-func (q *CompletionSuggester) PrefixWithOptions(prefix string, options *FuzzyCompletionSuggesterOptions) *CompletionSuggester {
- q.prefix = prefix
- q.fuzzyOptions = options
- return q
-}
-
-func (q *CompletionSuggester) FuzzyOptions(options *FuzzyCompletionSuggesterOptions) *CompletionSuggester {
- q.fuzzyOptions = options
- return q
-}
-
-func (q *CompletionSuggester) Fuzziness(fuzziness interface{}) *CompletionSuggester {
- if q.fuzzyOptions == nil {
- q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions()
- }
- q.fuzzyOptions = q.fuzzyOptions.EditDistance(fuzziness)
- return q
-}
-
-func (q *CompletionSuggester) Regex(regex string) *CompletionSuggester {
- q.regex = regex
- return q
-}
-
-func (q *CompletionSuggester) RegexWithOptions(regex string, options *RegexCompletionSuggesterOptions) *CompletionSuggester {
- q.regex = regex
- q.regexOptions = options
- return q
-}
-
-func (q *CompletionSuggester) RegexOptions(options *RegexCompletionSuggesterOptions) *CompletionSuggester {
- q.regexOptions = options
- return q
-}
-
-func (q *CompletionSuggester) SkipDuplicates(skipDuplicates bool) *CompletionSuggester {
- q.skipDuplicates = &skipDuplicates
- return q
-}
-
-func (q *CompletionSuggester) Field(field string) *CompletionSuggester {
- q.field = field
- return q
-}
-
-func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester {
- q.analyzer = analyzer
- return q
-}
-
-func (q *CompletionSuggester) Size(size int) *CompletionSuggester {
- q.size = &size
- return q
-}
-
-func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester {
- q.shardSize = &shardSize
- return q
-}
-
-func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester {
- q.contextQueries = append(q.contextQueries, query)
- return q
-}
-
-func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester {
- q.contextQueries = append(q.contextQueries, queries...)
- return q
-}
-
-// completionSuggesterRequest is necessary because the order in which
-// the JSON elements are routed to Elasticsearch is relevant.
-// We got into trouble when using plain maps because the text element
-// needs to go before the completion element.
-type completionSuggesterRequest struct {
- Text string `json:"text,omitempty"`
- Prefix string `json:"prefix,omitempty"`
- Regex string `json:"regex,omitempty"`
- Completion interface{} `json:"completion,omitempty"`
-}
-
-// Source creates the JSON data for the completion suggester.
-func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) {
- cs := &completionSuggesterRequest{}
-
- if q.text != "" {
- cs.Text = q.text
- }
- if q.prefix != "" {
- cs.Prefix = q.prefix
- }
- if q.regex != "" {
- cs.Regex = q.regex
- }
-
- suggester := make(map[string]interface{})
- cs.Completion = suggester
-
- if q.analyzer != "" {
- suggester["analyzer"] = q.analyzer
- }
- if q.field != "" {
- suggester["field"] = q.field
- }
- if q.size != nil {
- suggester["size"] = *q.size
- }
- if q.shardSize != nil {
- suggester["shard_size"] = *q.shardSize
- }
- switch len(q.contextQueries) {
- case 0:
- case 1:
- src, err := q.contextQueries[0].Source()
- if err != nil {
- return nil, err
- }
- suggester["contexts"] = src
- default:
- ctxq := make(map[string]interface{})
- for _, query := range q.contextQueries {
- src, err := query.Source()
- if err != nil {
- return nil, err
- }
- // Merge the dictionary into ctxq
- m, ok := src.(map[string]interface{})
- if !ok {
- return nil, errors.New("elastic: context query is not a map")
- }
- for k, v := range m {
- ctxq[k] = v
- }
- }
- suggester["contexts"] = ctxq
- }
-
- // Fuzzy options
- if q.fuzzyOptions != nil {
- src, err := q.fuzzyOptions.Source()
- if err != nil {
- return nil, err
- }
- suggester["fuzzy"] = src
- }
-
- // Regex options
- if q.regexOptions != nil {
- src, err := q.regexOptions.Source()
- if err != nil {
- return nil, err
- }
- suggester["regex"] = src
- }
-
- if q.skipDuplicates != nil {
- suggester["skip_duplicates"] = *q.skipDuplicates
- }
-
- // TODO(oe) Add completion-suggester specific parameters here
-
- if !includeName {
- return cs, nil
- }
-
- source := make(map[string]interface{})
- source[q.name] = cs
- return source, nil
-}
-
-// -- Fuzzy options --
-
-// FuzzyCompletionSuggesterOptions represents the options for fuzzy completion suggester.
-type FuzzyCompletionSuggesterOptions struct {
- editDistance interface{}
- transpositions *bool
- minLength *int
- prefixLength *int
- unicodeAware *bool
- maxDeterminizedStates *int
-}
-
-// NewFuzzyCompletionSuggesterOptions initializes a new FuzzyCompletionSuggesterOptions instance.
-func NewFuzzyCompletionSuggesterOptions() *FuzzyCompletionSuggesterOptions {
- return &FuzzyCompletionSuggesterOptions{}
-}
-
-// EditDistance specifies the maximum number of edits, e.g. a number like "1" or "2"
-// or a string like "0..2" or ">5". See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/common-options.html#fuzziness
-// for details.
-func (o *FuzzyCompletionSuggesterOptions) EditDistance(editDistance interface{}) *FuzzyCompletionSuggesterOptions {
- o.editDistance = editDistance
- return o
-}
-
-// Transpositions, if set to true, are counted as one change instead of two (defaults to true).
-func (o *FuzzyCompletionSuggesterOptions) Transpositions(transpositions bool) *FuzzyCompletionSuggesterOptions {
- o.transpositions = &transpositions
- return o
-}
-
-// MinLength represents the minimum length of the input before fuzzy suggestions are returned (defaults to 3).
-func (o *FuzzyCompletionSuggesterOptions) MinLength(minLength int) *FuzzyCompletionSuggesterOptions {
- o.minLength = &minLength
- return o
-}
-
-// PrefixLength represents the minimum length of the input, which is not checked for
-// fuzzy alternatives (defaults to 1).
-func (o *FuzzyCompletionSuggesterOptions) PrefixLength(prefixLength int) *FuzzyCompletionSuggesterOptions {
- o.prefixLength = &prefixLength
- return o
-}
-
-// UnicodeAware, if true, all measurements (like fuzzy edit distance, transpositions, and lengths)
-// are measured in Unicode code points instead of in bytes. This is slightly slower than
-// raw bytes, so it is set to false by default.
-func (o *FuzzyCompletionSuggesterOptions) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggesterOptions {
- o.unicodeAware = &unicodeAware
- return o
-}
-
-// MaxDeterminizedStates is currently undocumented in Elasticsearch. It represents
-// the maximum automaton states allowed for fuzzy expansion.
-func (o *FuzzyCompletionSuggesterOptions) MaxDeterminizedStates(max int) *FuzzyCompletionSuggesterOptions {
- o.maxDeterminizedStates = &max
- return o
-}
-
-// Source creates the JSON data.
-func (o *FuzzyCompletionSuggesterOptions) Source() (interface{}, error) {
- out := make(map[string]interface{})
-
- if o.editDistance != nil {
- out["fuzziness"] = o.editDistance
- }
- if o.transpositions != nil {
- out["transpositions"] = *o.transpositions
- }
- if o.minLength != nil {
- out["min_length"] = *o.minLength
- }
- if o.prefixLength != nil {
- out["prefix_length"] = *o.prefixLength
- }
- if o.unicodeAware != nil {
- out["unicode_aware"] = *o.unicodeAware
- }
- if o.maxDeterminizedStates != nil {
- out["max_determinized_states"] = *o.maxDeterminizedStates
- }
-
- return out, nil
-}
-
-// -- Regex options --
-
-// RegexCompletionSuggesterOptions represents the options for regex completion suggester.
-type RegexCompletionSuggesterOptions struct {
- flags interface{} // string or int
- maxDeterminizedStates *int
-}
-
-// NewRegexCompletionSuggesterOptions initializes a new RegexCompletionSuggesterOptions instance.
-func NewRegexCompletionSuggesterOptions() *RegexCompletionSuggesterOptions {
- return &RegexCompletionSuggesterOptions{}
-}
-
-// Flags represents internal regex flags. See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters-completion.html#regex
-// for details.
-func (o *RegexCompletionSuggesterOptions) Flags(flags interface{}) *RegexCompletionSuggesterOptions {
- o.flags = flags
- return o
-}
-
-// MaxDeterminizedStates represents the maximum automaton states allowed for regex expansion.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters-completion.html#regex
-// for details.
-func (o *RegexCompletionSuggesterOptions) MaxDeterminizedStates(max int) *RegexCompletionSuggesterOptions {
- o.maxDeterminizedStates = &max
- return o
-}
-
-// Source creates the JSON data.
-func (o *RegexCompletionSuggesterOptions) Source() (interface{}, error) {
- out := make(map[string]interface{})
-
- if o.flags != nil {
- out["flags"] = o.flags
- }
- if o.maxDeterminizedStates != nil {
- out["max_determinized_states"] = *o.maxDeterminizedStates
- }
-
- return out, nil
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_completion_test.go b/vendor/github.com/olivere/elastic/suggester_completion_test.go
deleted file mode 100644
index adbf58657..000000000
--- a/vendor/github.com/olivere/elastic/suggester_completion_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestCompletionSuggesterSource(t *testing.T) {
- s := NewCompletionSuggester("song-suggest").
- Text("n").
- Field("suggest")
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCompletionSuggesterPrefixSource(t *testing.T) {
- s := NewCompletionSuggester("song-suggest").
- Prefix("nir").
- Field("suggest")
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"song-suggest":{"prefix":"nir","completion":{"field":"suggest"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCompletionSuggesterPrefixWithFuzzySource(t *testing.T) {
- s := NewCompletionSuggester("song-suggest").
- Prefix("nor").
- Field("suggest").
- FuzzyOptions(NewFuzzyCompletionSuggesterOptions().EditDistance(2))
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"song-suggest":{"prefix":"nor","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCompletionSuggesterRegexSource(t *testing.T) {
- s := NewCompletionSuggester("song-suggest").
- Regex("n[ever|i]r").
- Field("suggest")
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"song-suggest":{"regex":"n[ever|i]r","completion":{"field":"suggest"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestCompletionSuggesterSourceWithMultipleContexts(t *testing.T) {
- s := NewCompletionSuggester("song-suggest").
- Text("n").
- Field("suggest").
- ContextQueries(
- NewSuggesterCategoryQuery("artist", "Sting"),
- NewSuggesterCategoryQuery("label", "BMG"),
- )
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"song-suggest":{"text":"n","completion":{"contexts":{"artist":[{"context":"Sting"}],"label":[{"context":"BMG"}]},"field":"suggest"}}}`
- if got != expected {
- t.Errorf("expected %s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_context.go b/vendor/github.com/olivere/elastic/suggester_context.go
deleted file mode 100644
index 12877c1a6..000000000
--- a/vendor/github.com/olivere/elastic/suggester_context.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "errors"
-
-// SuggesterContextQuery is used to define context information within
-// a suggestion request.
-type SuggesterContextQuery interface {
- Source() (interface{}, error)
-}
-
-// ContextSuggester is a fast suggester for e.g. type-ahead completion that supports filtering and boosting based on contexts.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html
-// for more details.
-type ContextSuggester struct {
- Suggester
- name string
- prefix string
- field string
- size *int
- contextQueries []SuggesterContextQuery
-}
-
-// Creates a new context suggester.
-func NewContextSuggester(name string) *ContextSuggester {
- return &ContextSuggester{
- name: name,
- contextQueries: make([]SuggesterContextQuery, 0),
- }
-}
-
-func (q *ContextSuggester) Name() string {
- return q.name
-}
-
-func (q *ContextSuggester) Prefix(prefix string) *ContextSuggester {
- q.prefix = prefix
- return q
-}
-
-func (q *ContextSuggester) Field(field string) *ContextSuggester {
- q.field = field
- return q
-}
-
-func (q *ContextSuggester) Size(size int) *ContextSuggester {
- q.size = &size
- return q
-}
-
-func (q *ContextSuggester) ContextQuery(query SuggesterContextQuery) *ContextSuggester {
- q.contextQueries = append(q.contextQueries, query)
- return q
-}
-
-func (q *ContextSuggester) ContextQueries(queries ...SuggesterContextQuery) *ContextSuggester {
- q.contextQueries = append(q.contextQueries, queries...)
- return q
-}
-
-// contextSuggesterRequest is necessary because the order in which
-// the JSON elements are routed to Elasticsearch is relevant.
-// We got into trouble when using plain maps because the text element
-// needs to go before the completion element.
-type contextSuggesterRequest struct {
- Prefix string `json:"prefix"`
- Completion interface{} `json:"completion"`
-}
-
-// Creates the source for the context suggester.
-func (q *ContextSuggester) Source(includeName bool) (interface{}, error) {
- cs := &contextSuggesterRequest{}
-
- if q.prefix != "" {
- cs.Prefix = q.prefix
- }
-
- suggester := make(map[string]interface{})
- cs.Completion = suggester
-
- if q.field != "" {
- suggester["field"] = q.field
- }
- if q.size != nil {
- suggester["size"] = *q.size
- }
- switch len(q.contextQueries) {
- case 0:
- case 1:
- src, err := q.contextQueries[0].Source()
- if err != nil {
- return nil, err
- }
- suggester["contexts"] = src
- default:
- ctxq := make(map[string]interface{})
- for _, query := range q.contextQueries {
- src, err := query.Source()
- if err != nil {
- return nil, err
- }
- // Merge the dictionary into ctxq
- m, ok := src.(map[string]interface{})
- if !ok {
- return nil, errors.New("elastic: context query is not a map")
- }
- for k, v := range m {
- ctxq[k] = v
- }
- }
- suggester["contexts"] = ctxq
- }
-
- if !includeName {
- return cs, nil
- }
-
- source := make(map[string]interface{})
- source[q.name] = cs
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_category.go b/vendor/github.com/olivere/elastic/suggester_context_category.go
deleted file mode 100644
index 9c50651fa..000000000
--- a/vendor/github.com/olivere/elastic/suggester_context_category.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// -- SuggesterCategoryMapping --
-
-// SuggesterCategoryMapping provides a mapping for a category context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_category_mapping.
-type SuggesterCategoryMapping struct {
- name string
- fieldName string
- defaultValues []string
-}
-
-// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping.
-func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping {
- return &SuggesterCategoryMapping{
- name: name,
- defaultValues: make([]string, 0),
- }
-}
-
-func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping {
- q.defaultValues = append(q.defaultValues, values...)
- return q
-}
-
-func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping {
- q.fieldName = fieldName
- return q
-}
-
-// Source returns a map that will be used to serialize the context query as JSON.
-func (q *SuggesterCategoryMapping) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- x := make(map[string]interface{})
- source[q.name] = x
-
- x["type"] = "category"
-
- switch len(q.defaultValues) {
- case 0:
- x["default"] = q.defaultValues
- case 1:
- x["default"] = q.defaultValues[0]
- default:
- x["default"] = q.defaultValues
- }
-
- if q.fieldName != "" {
- x["path"] = q.fieldName
- }
- return source, nil
-}
-
-// -- SuggesterCategoryQuery --
-
-// SuggesterCategoryQuery provides querying a category context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_category_query.
-type SuggesterCategoryQuery struct {
- name string
- values map[string]*int
-}
-
-// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery.
-func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery {
- q := &SuggesterCategoryQuery{
- name: name,
- values: make(map[string]*int),
- }
-
- if len(values) > 0 {
- q.Values(values...)
- }
- return q
-}
-
-func (q *SuggesterCategoryQuery) Value(val string) *SuggesterCategoryQuery {
- q.values[val] = nil
- return q
-}
-
-func (q *SuggesterCategoryQuery) ValueWithBoost(val string, boost int) *SuggesterCategoryQuery {
- q.values[val] = &boost
- return q
-}
-
-func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery {
- for _, val := range values {
- q.values[val] = nil
- }
- return q
-}
-
-// Source returns a map that will be used to serialize the context query as JSON.
-func (q *SuggesterCategoryQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- switch len(q.values) {
- case 0:
- source[q.name] = make([]string, 0)
- default:
- contexts := make([]interface{}, 0)
- for val, boost := range q.values {
- context := make(map[string]interface{})
- context["context"] = val
- if boost != nil {
- context["boost"] = *boost
- }
- contexts = append(contexts, context)
- }
- source[q.name] = contexts
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_category_test.go b/vendor/github.com/olivere/elastic/suggester_context_category_test.go
deleted file mode 100644
index 46acd725e..000000000
--- a/vendor/github.com/olivere/elastic/suggester_context_category_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSuggesterCategoryMapping(t *testing.T) {
- q := NewSuggesterCategoryMapping("color").DefaultValues("red")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"color":{"default":"red","type":"category"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSuggesterCategoryMappingWithTwoDefaultValues(t *testing.T) {
- q := NewSuggesterCategoryMapping("color").DefaultValues("red", "orange")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"color":{"default":["red","orange"],"type":"category"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSuggesterCategoryMappingWithFieldName(t *testing.T) {
- q := NewSuggesterCategoryMapping("color").
- DefaultValues("red", "orange").
- FieldName("color_field")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"color":{"default":["red","orange"],"path":"color_field","type":"category"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSuggesterCategoryQuery(t *testing.T) {
- q := NewSuggesterCategoryQuery("color", "red")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"color":[{"context":"red"}]}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSuggesterCategoryQueryWithTwoValues(t *testing.T) {
- q := NewSuggesterCategoryQuery("color", "red", "yellow")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expectedOutcomes := []string{
- `{"color":[{"context":"red"},{"context":"yellow"}]}`,
- `{"color":[{"context":"yellow"},{"context":"red"}]}`,
- }
- var match bool
- for _, expected := range expectedOutcomes {
- if got == expected {
- match = true
- break
- }
- }
- if !match {
- t.Errorf("expected any of %v\n,got:\n%s", expectedOutcomes, got)
- }
-}
-
-func TestSuggesterCategoryQueryWithBoost(t *testing.T) {
- q := NewSuggesterCategoryQuery("color", "red")
- q.ValueWithBoost("yellow", 4)
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expectedOutcomes := []string{
- `{"color":[{"context":"red"},{"boost":4,"context":"yellow"}]}`,
- `{"color":[{"boost":4,"context":"yellow"},{"context":"red"}]}`,
- }
- var match bool
- for _, expected := range expectedOutcomes {
- if got == expected {
- match = true
- break
- }
- }
- if !match {
- t.Errorf("expected any of %v\n,got:\n%v", expectedOutcomes, got)
- }
-}
-
-func TestSuggesterCategoryQueryWithoutBoost(t *testing.T) {
- q := NewSuggesterCategoryQuery("color", "red")
- q.Value("yellow")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expectedOutcomes := []string{
- `{"color":[{"context":"red"},{"context":"yellow"}]}`,
- `{"color":[{"context":"yellow"},{"context":"red"}]}`,
- }
- var match bool
- for _, expected := range expectedOutcomes {
- if got == expected {
- match = true
- break
- }
- }
- if !match {
- t.Errorf("expected any of %v\n,got:\n%s", expectedOutcomes, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_geo.go b/vendor/github.com/olivere/elastic/suggester_context_geo.go
deleted file mode 100644
index 3fea63feb..000000000
--- a/vendor/github.com/olivere/elastic/suggester_context_geo.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// -- SuggesterGeoMapping --
-
-// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_geo_location_mapping.
-type SuggesterGeoMapping struct {
- name string
- defaultLocations []*GeoPoint
- precision []string
- neighbors *bool
- fieldName string
-}
-
-// NewSuggesterGeoMapping creates a new SuggesterGeoMapping.
-func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping {
- return &SuggesterGeoMapping{
- name: name,
- }
-}
-
-func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping {
- q.defaultLocations = append(q.defaultLocations, locations...)
- return q
-}
-
-func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping {
- q.precision = append(q.precision, precision...)
- return q
-}
-
-func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping {
- q.neighbors = &neighbors
- return q
-}
-
-func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping {
- q.fieldName = fieldName
- return q
-}
-
-// Source returns a map that will be used to serialize the context query as JSON.
-func (q *SuggesterGeoMapping) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- x := make(map[string]interface{})
- source[q.name] = x
-
- x["type"] = "geo"
-
- if len(q.precision) > 0 {
- x["precision"] = q.precision
- }
- if q.neighbors != nil {
- x["neighbors"] = *q.neighbors
- }
-
- switch len(q.defaultLocations) {
- case 0:
- case 1:
- x["default"] = q.defaultLocations[0].Source()
- default:
- var arr []interface{}
- for _, p := range q.defaultLocations {
- arr = append(arr, p.Source())
- }
- x["default"] = arr
- }
-
- if q.fieldName != "" {
- x["path"] = q.fieldName
- }
- return source, nil
-}
-
-// -- SuggesterGeoQuery --
-
-// SuggesterGeoQuery provides querying a geolocation context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_geo_location_query
-type SuggesterGeoQuery struct {
- name string
- location *GeoPoint
- precision []string
-}
-
-// NewSuggesterGeoQuery creates a new SuggesterGeoQuery.
-func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery {
- return &SuggesterGeoQuery{
- name: name,
- location: location,
- precision: make([]string, 0),
- }
-}
-
-func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery {
- q.precision = append(q.precision, precision...)
- return q
-}
-
-// Source returns a map that will be used to serialize the context query as JSON.
-func (q *SuggesterGeoQuery) Source() (interface{}, error) {
- source := make(map[string]interface{})
-
- if len(q.precision) == 0 {
- if q.location != nil {
- source[q.name] = q.location.Source()
- }
- } else {
- x := make(map[string]interface{})
- source[q.name] = x
-
- if q.location != nil {
- x["value"] = q.location.Source()
- }
-
- switch len(q.precision) {
- case 0:
- case 1:
- x["precision"] = q.precision[0]
- default:
- x["precision"] = q.precision
- }
- }
-
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_geo_test.go b/vendor/github.com/olivere/elastic/suggester_context_geo_test.go
deleted file mode 100644
index b1ab2f495..000000000
--- a/vendor/github.com/olivere/elastic/suggester_context_geo_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestSuggesterGeoMapping(t *testing.T) {
- q := NewSuggesterGeoMapping("location").
- Precision("1km", "5m").
- Neighbors(true).
- FieldName("pin").
- DefaultLocations(GeoPointFromLatLon(0.0, 0.0))
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestSuggesterGeoQuery(t *testing.T) {
- q := NewSuggesterGeoQuery("location", GeoPointFromLatLon(11.5, 62.71)).Precision("1km")
- src, err := q.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"location":{"precision":"1km","value":{"lat":11.5,"lon":62.71}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_test.go b/vendor/github.com/olivere/elastic/suggester_context_test.go
deleted file mode 100644
index 045ccb2f4..000000000
--- a/vendor/github.com/olivere/elastic/suggester_context_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestContextSuggesterSource(t *testing.T) {
- s := NewContextSuggester("place_suggestion").
- Prefix("tim").
- Field("suggest")
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"place_suggestion":{"prefix":"tim","completion":{"field":"suggest"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestContextSuggesterSourceWithMultipleContexts(t *testing.T) {
- s := NewContextSuggester("place_suggestion").
- Prefix("tim").
- Field("suggest").
- ContextQueries(
- NewSuggesterCategoryQuery("place_type", "cafe", "restaurants"),
- )
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- // Due to the randomization of dictionary key, we could actually have two different valid expected outcomes
- expected := `{"place_suggestion":{"prefix":"tim","completion":{"contexts":{"place_type":[{"context":"cafe"},{"context":"restaurants"}]},"field":"suggest"}}}`
- if got != expected {
- expected := `{"place_suggestion":{"prefix":"tim","completion":{"contexts":{"place_type":[{"context":"restaurants"},{"context":"cafe"}]},"field":"suggest"}}}`
- if got != expected {
- t.Errorf("expected %s\n,got:\n%s", expected, got)
- }
- }
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_phrase.go b/vendor/github.com/olivere/elastic/suggester_phrase.go
deleted file mode 100644
index 2f6b6a326..000000000
--- a/vendor/github.com/olivere/elastic/suggester_phrase.go
+++ /dev/null
@@ -1,546 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// PhraseSuggester provides an API to access word alternatives
-// on a per token basis within a certain string distance.
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html.
-type PhraseSuggester struct {
- Suggester
- name string
- text string
- field string
- analyzer string
- size *int
- shardSize *int
- contextQueries []SuggesterContextQuery
-
- // fields specific to a phrase suggester
- maxErrors *float64
- separator *string
- realWordErrorLikelihood *float64
- confidence *float64
- generators map[string][]CandidateGenerator
- gramSize *int
- smoothingModel SmoothingModel
- forceUnigrams *bool
- tokenLimit *int
- preTag, postTag *string
- collateQuery *string
- collatePreference *string
- collateParams map[string]interface{}
- collatePrune *bool
-}
-
-// NewPhraseSuggester creates a new PhraseSuggester.
-func NewPhraseSuggester(name string) *PhraseSuggester {
- return &PhraseSuggester{
- name: name,
- collateParams: make(map[string]interface{}),
- }
-}
-
-func (q *PhraseSuggester) Name() string {
- return q.name
-}
-
-func (q *PhraseSuggester) Text(text string) *PhraseSuggester {
- q.text = text
- return q
-}
-
-func (q *PhraseSuggester) Field(field string) *PhraseSuggester {
- q.field = field
- return q
-}
-
-func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester {
- q.analyzer = analyzer
- return q
-}
-
-func (q *PhraseSuggester) Size(size int) *PhraseSuggester {
- q.size = &size
- return q
-}
-
-func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester {
- q.shardSize = &shardSize
- return q
-}
-
-func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester {
- q.contextQueries = append(q.contextQueries, query)
- return q
-}
-
-func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester {
- q.contextQueries = append(q.contextQueries, queries...)
- return q
-}
-
-func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester {
- if gramSize >= 1 {
- q.gramSize = &gramSize
- }
- return q
-}
-
-func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester {
- q.maxErrors = &maxErrors
- return q
-}
-
-func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester {
- q.separator = &separator
- return q
-}
-
-func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester {
- q.realWordErrorLikelihood = &realWordErrorLikelihood
- return q
-}
-
-func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester {
- q.confidence = &confidence
- return q
-}
-
-func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester {
- if q.generators == nil {
- q.generators = make(map[string][]CandidateGenerator)
- }
- typ := generator.Type()
- if _, found := q.generators[typ]; !found {
- q.generators[typ] = make([]CandidateGenerator, 0)
- }
- q.generators[typ] = append(q.generators[typ], generator)
- return q
-}
-
-func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester {
- for _, g := range generators {
- q = q.CandidateGenerator(g)
- }
- return q
-}
-
-func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester {
- q.generators = nil
- return q
-}
-
-func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester {
- q.forceUnigrams = &forceUnigrams
- return q
-}
-
-func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester {
- q.smoothingModel = smoothingModel
- return q
-}
-
-func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester {
- q.tokenLimit = &tokenLimit
- return q
-}
-
-func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester {
- q.preTag = &preTag
- q.postTag = &postTag
- return q
-}
-
-func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester {
- q.collateQuery = &collateQuery
- return q
-}
-
-func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester {
- q.collatePreference = &collatePreference
- return q
-}
-
-func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester {
- q.collateParams = collateParams
- return q
-}
-
-func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester {
- q.collatePrune = &collatePrune
- return q
-}
-
-// phraseSuggesterRequest is necessary because the order in which
-// the JSON elements are routed to Elasticsearch is relevant.
-// We got into trouble when using plain maps because the text element
-// needs to go before the simple_phrase element.
-type phraseSuggesterRequest struct {
- Text string `json:"text"`
- Phrase interface{} `json:"phrase"`
-}
-
-// Source generates the source for the phrase suggester.
-func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) {
- ps := &phraseSuggesterRequest{}
-
- if q.text != "" {
- ps.Text = q.text
- }
-
- suggester := make(map[string]interface{})
- ps.Phrase = suggester
-
- if q.analyzer != "" {
- suggester["analyzer"] = q.analyzer
- }
- if q.field != "" {
- suggester["field"] = q.field
- }
- if q.size != nil {
- suggester["size"] = *q.size
- }
- if q.shardSize != nil {
- suggester["shard_size"] = *q.shardSize
- }
- switch len(q.contextQueries) {
- case 0:
- case 1:
- src, err := q.contextQueries[0].Source()
- if err != nil {
- return nil, err
- }
- suggester["contexts"] = src
- default:
- var ctxq []interface{}
- for _, query := range q.contextQueries {
- src, err := query.Source()
- if err != nil {
- return nil, err
- }
- ctxq = append(ctxq, src)
- }
- suggester["contexts"] = ctxq
- }
-
- // Phase-specified parameters
- if q.realWordErrorLikelihood != nil {
- suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood
- }
- if q.confidence != nil {
- suggester["confidence"] = *q.confidence
- }
- if q.separator != nil {
- suggester["separator"] = *q.separator
- }
- if q.maxErrors != nil {
- suggester["max_errors"] = *q.maxErrors
- }
- if q.gramSize != nil {
- suggester["gram_size"] = *q.gramSize
- }
- if q.forceUnigrams != nil {
- suggester["force_unigrams"] = *q.forceUnigrams
- }
- if q.tokenLimit != nil {
- suggester["token_limit"] = *q.tokenLimit
- }
- if q.generators != nil && len(q.generators) > 0 {
- for typ, generators := range q.generators {
- var arr []interface{}
- for _, g := range generators {
- src, err := g.Source()
- if err != nil {
- return nil, err
- }
- arr = append(arr, src)
- }
- suggester[typ] = arr
- }
- }
- if q.smoothingModel != nil {
- src, err := q.smoothingModel.Source()
- if err != nil {
- return nil, err
- }
- x := make(map[string]interface{})
- x[q.smoothingModel.Type()] = src
- suggester["smoothing"] = x
- }
- if q.preTag != nil {
- hl := make(map[string]string)
- hl["pre_tag"] = *q.preTag
- if q.postTag != nil {
- hl["post_tag"] = *q.postTag
- }
- suggester["highlight"] = hl
- }
- if q.collateQuery != nil {
- collate := make(map[string]interface{})
- suggester["collate"] = collate
- if q.collateQuery != nil {
- collate["query"] = *q.collateQuery
- }
- if q.collatePreference != nil {
- collate["preference"] = *q.collatePreference
- }
- if len(q.collateParams) > 0 {
- collate["params"] = q.collateParams
- }
- if q.collatePrune != nil {
- collate["prune"] = *q.collatePrune
- }
- }
-
- if !includeName {
- return ps, nil
- }
-
- source := make(map[string]interface{})
- source[q.name] = ps
- return source, nil
-}
-
-// -- Smoothing models --
-
-type SmoothingModel interface {
- Type() string
- Source() (interface{}, error)
-}
-
-// StupidBackoffSmoothingModel implements a stupid backoff smoothing model.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
-// for details about smoothing models.
-type StupidBackoffSmoothingModel struct {
- discount float64
-}
-
-func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel {
- return &StupidBackoffSmoothingModel{
- discount: discount,
- }
-}
-
-func (sm *StupidBackoffSmoothingModel) Type() string {
- return "stupid_backoff"
-}
-
-func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) {
- source := make(map[string]interface{})
- source["discount"] = sm.discount
- return source, nil
-}
-
-// --
-
-// LaplaceSmoothingModel implements a laplace smoothing model.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
-// for details about smoothing models.
-type LaplaceSmoothingModel struct {
- alpha float64
-}
-
-func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel {
- return &LaplaceSmoothingModel{
- alpha: alpha,
- }
-}
-
-func (sm *LaplaceSmoothingModel) Type() string {
- return "laplace"
-}
-
-func (sm *LaplaceSmoothingModel) Source() (interface{}, error) {
- source := make(map[string]interface{})
- source["alpha"] = sm.alpha
- return source, nil
-}
-
-// --
-
-// LinearInterpolationSmoothingModel implements a linear interpolation
-// smoothing model.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
-// for details about smoothing models.
-type LinearInterpolationSmoothingModel struct {
- trigramLamda float64
- bigramLambda float64
- unigramLambda float64
-}
-
-func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel {
- return &LinearInterpolationSmoothingModel{
- trigramLamda: trigramLamda,
- bigramLambda: bigramLambda,
- unigramLambda: unigramLambda,
- }
-}
-
-func (sm *LinearInterpolationSmoothingModel) Type() string {
- return "linear_interpolation"
-}
-
-func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) {
- source := make(map[string]interface{})
- source["trigram_lambda"] = sm.trigramLamda
- source["bigram_lambda"] = sm.bigramLambda
- source["unigram_lambda"] = sm.unigramLambda
- return source, nil
-}
-
-// -- CandidateGenerator --
-
-type CandidateGenerator interface {
- Type() string
- Source() (interface{}, error)
-}
-
-// DirectCandidateGenerator implements a direct candidate generator.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
-// for details about smoothing models.
-type DirectCandidateGenerator struct {
- field string
- preFilter *string
- postFilter *string
- suggestMode *string
- accuracy *float64
- size *int
- sort *string
- stringDistance *string
- maxEdits *int
- maxInspections *int
- maxTermFreq *float64
- prefixLength *int
- minWordLength *int
- minDocFreq *float64
-}
-
-func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator {
- return &DirectCandidateGenerator{
- field: field,
- }
-}
-
-func (g *DirectCandidateGenerator) Type() string {
- return "direct_generator"
-}
-
-func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator {
- g.field = field
- return g
-}
-
-func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator {
- g.preFilter = &preFilter
- return g
-}
-
-func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator {
- g.postFilter = &postFilter
- return g
-}
-
-func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator {
- g.suggestMode = &suggestMode
- return g
-}
-
-func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator {
- g.accuracy = &accuracy
- return g
-}
-
-func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator {
- g.size = &size
- return g
-}
-
-func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator {
- g.sort = &sort
- return g
-}
-
-func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator {
- g.stringDistance = &stringDistance
- return g
-}
-
-func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator {
- g.maxEdits = &maxEdits
- return g
-}
-
-func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator {
- g.maxInspections = &maxInspections
- return g
-}
-
-func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator {
- g.maxTermFreq = &maxTermFreq
- return g
-}
-
-func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator {
- g.prefixLength = &prefixLength
- return g
-}
-
-func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator {
- g.minWordLength = &minWordLength
- return g
-}
-
-func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator {
- g.minDocFreq = &minDocFreq
- return g
-}
-
-func (g *DirectCandidateGenerator) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if g.field != "" {
- source["field"] = g.field
- }
- if g.suggestMode != nil {
- source["suggest_mode"] = *g.suggestMode
- }
- if g.accuracy != nil {
- source["accuracy"] = *g.accuracy
- }
- if g.size != nil {
- source["size"] = *g.size
- }
- if g.sort != nil {
- source["sort"] = *g.sort
- }
- if g.stringDistance != nil {
- source["string_distance"] = *g.stringDistance
- }
- if g.maxEdits != nil {
- source["max_edits"] = *g.maxEdits
- }
- if g.maxInspections != nil {
- source["max_inspections"] = *g.maxInspections
- }
- if g.maxTermFreq != nil {
- source["max_term_freq"] = *g.maxTermFreq
- }
- if g.prefixLength != nil {
- source["prefix_length"] = *g.prefixLength
- }
- if g.minWordLength != nil {
- source["min_word_length"] = *g.minWordLength
- }
- if g.minDocFreq != nil {
- source["min_doc_freq"] = *g.minDocFreq
- }
- if g.preFilter != nil {
- source["pre_filter"] = *g.preFilter
- }
- if g.postFilter != nil {
- source["post_filter"] = *g.postFilter
- }
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_phrase_test.go b/vendor/github.com/olivere/elastic/suggester_phrase_test.go
deleted file mode 100644
index 63dde686e..000000000
--- a/vendor/github.com/olivere/elastic/suggester_phrase_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestPhraseSuggesterSource(t *testing.T) {
- s := NewPhraseSuggester("name").
- Text("Xor the Got-Jewel").
- Analyzer("body").
- Field("bigram").
- Size(1).
- RealWordErrorLikelihood(0.95).
- MaxErrors(0.5).
- GramSize(2).
- Highlight("<em>", "</em>")
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) {
- geomapQ := NewSuggesterGeoMapping("location").
- Precision("1km", "5m").
- Neighbors(true).
- FieldName("pin").
- DefaultLocations(GeoPointFromLatLon(0.0, 0.0))
-
- s := NewPhraseSuggester("name").
- Text("Xor the Got-Jewel").
- Analyzer("body").
- Field("bigram").
- Size(1).
- RealWordErrorLikelihood(0.95).
- MaxErrors(0.5).
- GramSize(2).
- Highlight("<em>", "</em>").
- ContextQuery(geomapQ)
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","contexts":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPhraseSuggesterComplexSource(t *testing.T) {
- g1 := NewDirectCandidateGenerator("body").
- SuggestMode("always").
- MinWordLength(1)
-
- g2 := NewDirectCandidateGenerator("reverse").
- SuggestMode("always").
- MinWordLength(1).
- PreFilter("reverse").
- PostFilter("reverse")
-
- s := NewPhraseSuggester("simple_phrase").
- Text("Xor the Got-Jewel").
- Analyzer("body").
- Field("bigram").
- Size(4).
- RealWordErrorLikelihood(0.95).
- Confidence(2.0).
- GramSize(2).
- CandidateGenerators(g1, g2).
- CollateQuery(`"match":{"{{field_name}}" : "{{suggestion}}"}`).
- CollateParams(map[string]interface{}{"field_name": "title"}).
- CollatePreference("_primary").
- CollatePrune(true)
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"simple_phrase":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","collate":{"params":{"field_name":"title"},"preference":"_primary","prune":true,"query":"\"match\":{\"{{field_name}}\" : \"{{suggestion}}\"}"},"confidence":2,"direct_generator":[{"field":"body","min_word_length":1,"suggest_mode":"always"},{"field":"reverse","min_word_length":1,"post_filter":"reverse","pre_filter":"reverse","suggest_mode":"always"}],"field":"bigram","gram_size":2,"real_word_error_likelihood":0.95,"size":4}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestPhraseStupidBackoffSmoothingModel(t *testing.T) {
- s := NewStupidBackoffSmoothingModel(0.42)
- src, err := s.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- // The source does NOT include the smoothing model type!
- expected := `{"discount":0.42}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
- if s.Type() != "stupid_backoff" {
- t.Errorf("expected %q, got: %q", "stupid_backoff", s.Type())
- }
-}
-
-func TestPhraseLaplaceSmoothingModel(t *testing.T) {
- s := NewLaplaceSmoothingModel(0.63)
- src, err := s.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- // The source does NOT include the smoothing model type!
- expected := `{"alpha":0.63}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
- if s.Type() != "laplace" {
- t.Errorf("expected %q, got: %q", "laplace", s.Type())
- }
-}
-
-func TestLinearInterpolationSmoothingModel(t *testing.T) {
- s := NewLinearInterpolationSmoothingModel(0.3, 0.2, 0.05)
- src, err := s.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- // The source does NOT include the smoothing model type!
- expected := `{"bigram_lambda":0.2,"trigram_lambda":0.3,"unigram_lambda":0.05}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
- if s.Type() != "linear_interpolation" {
- t.Errorf("expected %q, got: %q", "linear_interpolation", s.Type())
- }
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_term.go b/vendor/github.com/olivere/elastic/suggester_term.go
deleted file mode 100644
index 69e1531f6..000000000
--- a/vendor/github.com/olivere/elastic/suggester_term.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// TermSuggester suggests terms based on edit distance.
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-term.html.
-type TermSuggester struct {
- Suggester
- name string
- text string
- field string
- analyzer string
- size *int
- shardSize *int
- contextQueries []SuggesterContextQuery
-
- // fields specific to term suggester
- suggestMode string
- accuracy *float64
- sort string
- stringDistance string
- maxEdits *int
- maxInspections *int
- maxTermFreq *float64
- prefixLength *int
- minWordLength *int
- minDocFreq *float64
-}
-
-// NewTermSuggester creates a new TermSuggester.
-func NewTermSuggester(name string) *TermSuggester {
- return &TermSuggester{
- name: name,
- }
-}
-
-func (q *TermSuggester) Name() string {
- return q.name
-}
-
-func (q *TermSuggester) Text(text string) *TermSuggester {
- q.text = text
- return q
-}
-
-func (q *TermSuggester) Field(field string) *TermSuggester {
- q.field = field
- return q
-}
-
-func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester {
- q.analyzer = analyzer
- return q
-}
-
-func (q *TermSuggester) Size(size int) *TermSuggester {
- q.size = &size
- return q
-}
-
-func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester {
- q.shardSize = &shardSize
- return q
-}
-
-func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester {
- q.contextQueries = append(q.contextQueries, query)
- return q
-}
-
-func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester {
- q.contextQueries = append(q.contextQueries, queries...)
- return q
-}
-
-func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester {
- q.suggestMode = suggestMode
- return q
-}
-
-func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester {
- q.accuracy = &accuracy
- return q
-}
-
-func (q *TermSuggester) Sort(sort string) *TermSuggester {
- q.sort = sort
- return q
-}
-
-func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester {
- q.stringDistance = stringDistance
- return q
-}
-
-func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester {
- q.maxEdits = &maxEdits
- return q
-}
-
-func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester {
- q.maxInspections = &maxInspections
- return q
-}
-
-func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester {
- q.maxTermFreq = &maxTermFreq
- return q
-}
-
-func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester {
- q.prefixLength = &prefixLength
- return q
-}
-
-func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester {
- q.minWordLength = &minWordLength
- return q
-}
-
-func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester {
- q.minDocFreq = &minDocFreq
- return q
-}
-
-// termSuggesterRequest is necessary because the order in which
-// the JSON elements are routed to Elasticsearch is relevant.
-// We got into trouble when using plain maps because the text element
-// needs to go before the term element.
-type termSuggesterRequest struct {
- Text string `json:"text"`
- Term interface{} `json:"term"`
-}
-
-// Source generates the source for the term suggester.
-func (q *TermSuggester) Source(includeName bool) (interface{}, error) {
- // "suggest" : {
- // "my-suggest-1" : {
- // "text" : "the amsterdma meetpu",
- // "term" : {
- // "field" : "body"
- // }
- // },
- // "my-suggest-2" : {
- // "text" : "the rottredam meetpu",
- // "term" : {
- // "field" : "title",
- // }
- // }
- // }
- ts := &termSuggesterRequest{}
- if q.text != "" {
- ts.Text = q.text
- }
-
- suggester := make(map[string]interface{})
- ts.Term = suggester
-
- if q.analyzer != "" {
- suggester["analyzer"] = q.analyzer
- }
- if q.field != "" {
- suggester["field"] = q.field
- }
- if q.size != nil {
- suggester["size"] = *q.size
- }
- if q.shardSize != nil {
- suggester["shard_size"] = *q.shardSize
- }
- switch len(q.contextQueries) {
- case 0:
- case 1:
- src, err := q.contextQueries[0].Source()
- if err != nil {
- return nil, err
- }
- suggester["contexts"] = src
- default:
- ctxq := make([]interface{}, len(q.contextQueries))
- for i, query := range q.contextQueries {
- src, err := query.Source()
- if err != nil {
- return nil, err
- }
- ctxq[i] = src
- }
- suggester["contexts"] = ctxq
- }
-
- // Specific to term suggester
- if q.suggestMode != "" {
- suggester["suggest_mode"] = q.suggestMode
- }
- if q.accuracy != nil {
- suggester["accuracy"] = *q.accuracy
- }
- if q.sort != "" {
- suggester["sort"] = q.sort
- }
- if q.stringDistance != "" {
- suggester["string_distance"] = q.stringDistance
- }
- if q.maxEdits != nil {
- suggester["max_edits"] = *q.maxEdits
- }
- if q.maxInspections != nil {
- suggester["max_inspections"] = *q.maxInspections
- }
- if q.maxTermFreq != nil {
- suggester["max_term_freq"] = *q.maxTermFreq
- }
- if q.prefixLength != nil {
- suggester["prefix_length"] = *q.prefixLength
- }
- if q.minWordLength != nil {
- suggester["min_word_len"] = *q.minWordLength
- }
- if q.minDocFreq != nil {
- suggester["min_doc_freq"] = *q.minDocFreq
- }
-
- if !includeName {
- return ts, nil
- }
-
- source := make(map[string]interface{})
- source[q.name] = ts
- return source, nil
-}
diff --git a/vendor/github.com/olivere/elastic/suggester_term_test.go b/vendor/github.com/olivere/elastic/suggester_term_test.go
deleted file mode 100644
index d3250f69a..000000000
--- a/vendor/github.com/olivere/elastic/suggester_term_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "testing"
-)
-
-func TestTermSuggesterSource(t *testing.T) {
- s := NewTermSuggester("name").
- Text("n").
- Field("suggest")
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"name":{"text":"n","term":{"field":"suggest"}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestTermSuggesterWithPrefixLengthSource(t *testing.T) {
- s := NewTermSuggester("name").
- Text("n").
- Field("suggest").
- PrefixLength(0)
- src, err := s.Source(true)
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"name":{"text":"n","term":{"field":"suggest","prefix_length":0}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/tasks_cancel.go b/vendor/github.com/olivere/elastic/tasks_cancel.go
deleted file mode 100644
index 84f8aec35..000000000
--- a/vendor/github.com/olivere/elastic/tasks_cancel.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// TasksCancelService can cancel long-running tasks.
-// It is supported as of Elasticsearch 2.3.0.
-//
-// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/tasks-cancel.html
-// for details.
-type TasksCancelService struct {
- client *Client
- pretty bool
- taskId *int64
- actions []string
- nodeId []string
- parentNode string
- parentTask *int64
-}
-
-// NewTasksCancelService creates a new TasksCancelService.
-func NewTasksCancelService(client *Client) *TasksCancelService {
- return &TasksCancelService{
- client: client,
- actions: make([]string, 0),
- nodeId: make([]string, 0),
- }
-}
-
-// TaskId specifies the task to cancel. Set to -1 to cancel all tasks.
-func (s *TasksCancelService) TaskId(taskId int64) *TasksCancelService {
- s.taskId = &taskId
- return s
-}
-
-// Actions is a list of actions that should be cancelled. Leave empty to cancel all.
-func (s *TasksCancelService) Actions(actions []string) *TasksCancelService {
- s.actions = actions
- return s
-}
-
-// NodeId is a list of node IDs or names to limit the returned information;
-// use `_local` to return information from the node you're connecting to,
-// leave empty to get information from all nodes.
-func (s *TasksCancelService) NodeId(nodeId []string) *TasksCancelService {
- s.nodeId = nodeId
- return s
-}
-
-// ParentNode specifies to cancel tasks with specified parent node.
-func (s *TasksCancelService) ParentNode(parentNode string) *TasksCancelService {
- s.parentNode = parentNode
- return s
-}
-
-// ParentTask specifies to cancel tasks with specified parent task id.
-// Set to -1 to cancel all.
-func (s *TasksCancelService) ParentTask(parentTask int64) *TasksCancelService {
- s.parentTask = &parentTask
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *TasksCancelService) Pretty(pretty bool) *TasksCancelService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *TasksCancelService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if s.taskId != nil {
- path, err = uritemplates.Expand("/_tasks/{task_id}/_cancel", map[string]string{
- "task_id": fmt.Sprintf("%d", *s.taskId),
- })
- } else {
- path = "/_tasks/_cancel"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if len(s.actions) > 0 {
- params.Set("actions", strings.Join(s.actions, ","))
- }
- if len(s.nodeId) > 0 {
- params.Set("node_id", strings.Join(s.nodeId, ","))
- }
- if s.parentNode != "" {
- params.Set("parent_node", s.parentNode)
- }
- if s.parentTask != nil {
- params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *TasksCancelService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *TasksCancelService) Do(ctx context.Context) (*TasksListResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(TasksListResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
diff --git a/vendor/github.com/olivere/elastic/tasks_cancel_test.go b/vendor/github.com/olivere/elastic/tasks_cancel_test.go
deleted file mode 100644
index c9d863394..000000000
--- a/vendor/github.com/olivere/elastic/tasks_cancel_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import "testing"
-
-func TestTasksCancelBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- // Cancel all
- got, _, err := client.TasksCancel().buildURL()
- if err != nil {
- t.Fatal(err)
- }
- want := "/_tasks/_cancel"
- if got != want {
- t.Errorf("want %q; got %q", want, got)
- }
-
- // Cancel specific task
- got, _, err = client.TasksCancel().TaskId(42).buildURL()
- if err != nil {
- t.Fatal(err)
- }
- want = "/_tasks/42/_cancel"
- if got != want {
- t.Errorf("want %q; got %q", want, got)
- }
-}
-
-/*
-func TestTasksCancel(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "2.3.0" {
- t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion)
- }
- res, err := client.TasksCancel("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("response is nil")
- }
-}
-*/
diff --git a/vendor/github.com/olivere/elastic/tasks_get_task.go b/vendor/github.com/olivere/elastic/tasks_get_task.go
deleted file mode 100644
index 5f63726e4..000000000
--- a/vendor/github.com/olivere/elastic/tasks_get_task.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// TasksGetTaskService retrieves the state of a task in the cluster. It is part of the Task Management API
-// documented at http://www.elastic.co/guide/en/elasticsearch/reference/5.2/tasks-list.html.
-//
-// It is supported as of Elasticsearch 2.3.0.
-type TasksGetTaskService struct {
- client *Client
- pretty bool
- taskId string
- waitForCompletion *bool
-}
-
-// NewTasksGetTaskService creates a new TasksGetTaskService.
-func NewTasksGetTaskService(client *Client) *TasksGetTaskService {
- return &TasksGetTaskService{
- client: client,
- }
-}
-
-// TaskId indicates to return the task with specified id.
-func (s *TasksGetTaskService) TaskId(taskId string) *TasksGetTaskService {
- s.taskId = taskId
- return s
-}
-
-// WaitForCompletion indicates whether to wait for the matching tasks
-// to complete (default: false).
-func (s *TasksGetTaskService) WaitForCompletion(waitForCompletion bool) *TasksGetTaskService {
- s.waitForCompletion = &waitForCompletion
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *TasksGetTaskService) Pretty(pretty bool) *TasksGetTaskService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *TasksGetTaskService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_tasks/{task_id}", map[string]string{
- "task_id": s.taskId,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "1")
- }
- if s.waitForCompletion != nil {
- params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *TasksGetTaskService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *TasksGetTaskService) Do(ctx context.Context) (*TasksGetTaskResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(TasksGetTaskResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type TasksGetTaskResponse struct {
- Completed bool `json:"completed"`
- Task *TaskInfo `json:"task,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/tasks_get_task_test.go b/vendor/github.com/olivere/elastic/tasks_get_task_test.go
deleted file mode 100644
index a4da49c74..000000000
--- a/vendor/github.com/olivere/elastic/tasks_get_task_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "testing"
-)
-
-func TestTasksGetTaskBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- // Get specific task
- got, _, err := client.TasksGetTask().TaskId("123").buildURL()
- if err != nil {
- t.Fatal(err)
- }
- want := "/_tasks/123"
- if got != want {
- t.Errorf("want %q; got %q", want, got)
- }
-}
-
-/*
-func TestTasksGetTask(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "2.3.0" {
- t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion)
- }
- res, err := client.TasksGetTask().TaskId("123").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("response is nil")
- }
-}
-*/
diff --git a/vendor/github.com/olivere/elastic/tasks_list.go b/vendor/github.com/olivere/elastic/tasks_list.go
deleted file mode 100644
index 54299d961..000000000
--- a/vendor/github.com/olivere/elastic/tasks_list.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// TasksListService retrieves the list of currently executing tasks
-// on one ore more nodes in the cluster. It is part of the Task Management API
-// documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/tasks.html.
-//
-// It is supported as of Elasticsearch 2.3.0.
-type TasksListService struct {
- client *Client
- pretty bool
- taskId []string
- actions []string
- detailed *bool
- nodeId []string
- parentNode string
- parentTaskId *string
- waitForCompletion *bool
- groupBy string
-}
-
-// NewTasksListService creates a new TasksListService.
-func NewTasksListService(client *Client) *TasksListService {
- return &TasksListService{
- client: client,
- }
-}
-
-// TaskId indicates to returns the task(s) with specified id(s).
-func (s *TasksListService) TaskId(taskId ...string) *TasksListService {
- s.taskId = append(s.taskId, taskId...)
- return s
-}
-
-// Actions is a list of actions that should be returned. Leave empty to return all.
-func (s *TasksListService) Actions(actions ...string) *TasksListService {
- s.actions = append(s.actions, actions...)
- return s
-}
-
-// Detailed indicates whether to return detailed task information (default: false).
-func (s *TasksListService) Detailed(detailed bool) *TasksListService {
- s.detailed = &detailed
- return s
-}
-
-// NodeId is a list of node IDs or names to limit the returned information;
-// use `_local` to return information from the node you're connecting to,
-// leave empty to get information from all nodes.
-func (s *TasksListService) NodeId(nodeId ...string) *TasksListService {
- s.nodeId = append(s.nodeId, nodeId...)
- return s
-}
-
-// ParentNode returns tasks with specified parent node.
-func (s *TasksListService) ParentNode(parentNode string) *TasksListService {
- s.parentNode = parentNode
- return s
-}
-
-// ParentTaskId returns tasks with specified parent task id (node_id:task_number). Set to -1 to return all.
-func (s *TasksListService) ParentTaskId(parentTaskId string) *TasksListService {
- s.parentTaskId = &parentTaskId
- return s
-}
-
-// WaitForCompletion indicates whether to wait for the matching tasks
-// to complete (default: false).
-func (s *TasksListService) WaitForCompletion(waitForCompletion bool) *TasksListService {
- s.waitForCompletion = &waitForCompletion
- return s
-}
-
-// GroupBy groups tasks by nodes or parent/child relationships.
-// As of now, it can either be "nodes" (default) or "parents".
-func (s *TasksListService) GroupBy(groupBy string) *TasksListService {
- s.groupBy = groupBy
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *TasksListService) Pretty(pretty bool) *TasksListService {
- s.pretty = pretty
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *TasksListService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.taskId) > 0 {
- path, err = uritemplates.Expand("/_tasks/{task_id}", map[string]string{
- "task_id": strings.Join(s.taskId, ","),
- })
- } else {
- path = "/_tasks"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if len(s.actions) > 0 {
- params.Set("actions", strings.Join(s.actions, ","))
- }
- if s.detailed != nil {
- params.Set("detailed", fmt.Sprintf("%v", *s.detailed))
- }
- if len(s.nodeId) > 0 {
- params.Set("node_id", strings.Join(s.nodeId, ","))
- }
- if s.parentNode != "" {
- params.Set("parent_node", s.parentNode)
- }
- if s.parentTaskId != nil {
- params.Set("parent_task_id", *s.parentTaskId)
- }
- if s.waitForCompletion != nil {
- params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
- }
- if s.groupBy != "" {
- params.Set("group_by", s.groupBy)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *TasksListService) Validate() error {
- return nil
-}
-
-// Do executes the operation.
-func (s *TasksListService) Do(ctx context.Context) (*TasksListResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(TasksListResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// TasksListResponse is the response of TasksListService.Do.
-type TasksListResponse struct {
- TaskFailures []*TaskOperationFailure `json:"task_failures"`
- NodeFailures []*FailedNodeException `json:"node_failures"`
- // Nodes returns the tasks per node. The key is the node id.
- Nodes map[string]*DiscoveryNode `json:"nodes"`
-}
-
-type TaskOperationFailure struct {
- TaskId int64 `json:"task_id"` // this is a long in the Java source
- NodeId string `json:"node_id"`
- Status string `json:"status"`
- Reason *ErrorDetails `json:"reason"`
-}
-
-type FailedNodeException struct {
- *ErrorDetails
- NodeId string `json:"node_id"`
-}
-
-type DiscoveryNode struct {
- Name string `json:"name"`
- TransportAddress string `json:"transport_address"`
- Host string `json:"host"`
- IP string `json:"ip"`
- Roles []string `json:"roles"` // "master", "data", or "ingest"
- Attributes map[string]interface{} `json:"attributes"`
- // Tasks returns the tasks by its id (as a string).
- Tasks map[string]*TaskInfo `json:"tasks"`
-}
-
-// TaskInfo represents information about a currently running task.
-type TaskInfo struct {
- Node string `json:"node"`
- Id int64 `json:"id"` // the task id (yes, this is a long in the Java source)
- Type string `json:"type"`
- Action string `json:"action"`
- Status interface{} `json:"status"` // has separate implementations of Task.Status in Java for reindexing, replication, and "RawTaskStatus"
- Description interface{} `json:"description"` // same as Status
- StartTime string `json:"start_time"`
- StartTimeInMillis int64 `json:"start_time_in_millis"`
- RunningTime string `json:"running_time"`
- RunningTimeInNanos int64 `json:"running_time_in_nanos"`
- Cancellable bool `json:"cancellable"`
- ParentTaskId string `json:"parent_task_id"` // like "YxJnVYjwSBm_AUbzddTajQ:12356"
-}
-
-// StartTaskResult is used in cases where a task gets started asynchronously and
-// the operation simply returnes a TaskID to watch for via the Task Management API.
-type StartTaskResult struct {
- TaskId string `json:"task"`
-}
diff --git a/vendor/github.com/olivere/elastic/tasks_list_test.go b/vendor/github.com/olivere/elastic/tasks_list_test.go
deleted file mode 100644
index 9ecabcd68..000000000
--- a/vendor/github.com/olivere/elastic/tasks_list_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestTasksListBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- TaskId []string
- Expected string
- }{
- {
- []string{},
- "/_tasks",
- },
- {
- []string{"42"},
- "/_tasks/42",
- },
- {
- []string{"42", "37"},
- "/_tasks/42%2C37",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.TasksList().TaskId(test.TaskId...).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestTasksList(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "2.3.0" {
- t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion)
- }
-
- res, err := client.TasksList().Pretty(true).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("response is nil")
- }
- if len(res.Nodes) == 0 {
- t.Fatalf("expected at least 1 node; got: %d", len(res.Nodes))
- }
-}
diff --git a/vendor/github.com/olivere/elastic/termvectors.go b/vendor/github.com/olivere/elastic/termvectors.go
deleted file mode 100644
index 5943ad14f..000000000
--- a/vendor/github.com/olivere/elastic/termvectors.go
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// TermvectorsService returns information and statistics on terms in the
-// fields of a particular document. The document could be stored in the
-// index or artificially provided by the user.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-termvectors.html
-// for documentation.
-type TermvectorsService struct {
- client *Client
- pretty bool
- id string
- index string
- typ string
- dfs *bool
- doc interface{}
- fieldStatistics *bool
- fields []string
- filter *TermvectorsFilterSettings
- perFieldAnalyzer map[string]string
- offsets *bool
- parent string
- payloads *bool
- positions *bool
- preference string
- realtime *bool
- routing string
- termStatistics *bool
- version interface{}
- versionType string
- bodyJson interface{}
- bodyString string
-}
-
-// NewTermvectorsService creates a new TermvectorsService.
-func NewTermvectorsService(client *Client) *TermvectorsService {
- return &TermvectorsService{
- client: client,
- }
-}
-
-// Index in which the document resides.
-func (s *TermvectorsService) Index(index string) *TermvectorsService {
- s.index = index
- return s
-}
-
-// Type of the document.
-func (s *TermvectorsService) Type(typ string) *TermvectorsService {
- s.typ = typ
- return s
-}
-
-// Id of the document.
-func (s *TermvectorsService) Id(id string) *TermvectorsService {
- s.id = id
- return s
-}
-
-// Dfs specifies if distributed frequencies should be returned instead
-// shard frequencies.
-func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService {
- s.dfs = &dfs
- return s
-}
-
-// Doc is the document to analyze.
-func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService {
- s.doc = doc
- return s
-}
-
-// FieldStatistics specifies if document count, sum of document frequencies
-// and sum of total term frequencies should be returned.
-func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService {
- s.fieldStatistics = &fieldStatistics
- return s
-}
-
-// Fields a list of fields to return.
-func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService {
- if s.fields == nil {
- s.fields = make([]string, 0)
- }
- s.fields = append(s.fields, fields...)
- return s
-}
-
-// Filter adds terms filter settings.
-func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService {
- s.filter = filter
- return s
-}
-
-// PerFieldAnalyzer allows to specify a different analyzer than the one
-// at the field.
-func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService {
- s.perFieldAnalyzer = perFieldAnalyzer
- return s
-}
-
-// Offsets specifies if term offsets should be returned.
-func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService {
- s.offsets = &offsets
- return s
-}
-
-// Parent id of documents.
-func (s *TermvectorsService) Parent(parent string) *TermvectorsService {
- s.parent = parent
- return s
-}
-
-// Payloads specifies if term payloads should be returned.
-func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService {
- s.payloads = &payloads
- return s
-}
-
-// Positions specifies if term positions should be returned.
-func (s *TermvectorsService) Positions(positions bool) *TermvectorsService {
- s.positions = &positions
- return s
-}
-
-// Preference specify the node or shard the operation
-// should be performed on (default: random).
-func (s *TermvectorsService) Preference(preference string) *TermvectorsService {
- s.preference = preference
- return s
-}
-
-// Realtime specifies if request is real-time as opposed to
-// near-real-time (default: true).
-func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService {
- s.realtime = &realtime
- return s
-}
-
-// Routing is a specific routing value.
-func (s *TermvectorsService) Routing(routing string) *TermvectorsService {
- s.routing = routing
- return s
-}
-
-// TermStatistics specifies if total term frequency and document frequency
-// should be returned.
-func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService {
- s.termStatistics = &termStatistics
- return s
-}
-
-// Version an explicit version number for concurrency control.
-func (s *TermvectorsService) Version(version interface{}) *TermvectorsService {
- s.version = version
- return s
-}
-
-// VersionType specifies a version type ("internal", "external", or "external_gte").
-func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService {
- s.versionType = versionType
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson defines the body parameters. See documentation.
-func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService {
- s.bodyJson = body
- return s
-}
-
-// BodyString defines the body parameters as a string. See documentation.
-func (s *TermvectorsService) BodyString(body string) *TermvectorsService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *TermvectorsService) buildURL() (string, url.Values, error) {
- var pathParam = map[string]string{
- "index": s.index,
- "type": s.typ,
- }
- var path string
- var err error
-
- // Build URL
- if s.id != "" {
- pathParam["id"] = s.id
- path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam)
- } else {
- path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam)
- }
-
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if s.dfs != nil {
- params.Set("dfs", fmt.Sprintf("%v", *s.dfs))
- }
- if s.fieldStatistics != nil {
- params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics))
- }
- if len(s.fields) > 0 {
- params.Set("fields", strings.Join(s.fields, ","))
- }
- if s.offsets != nil {
- params.Set("offsets", fmt.Sprintf("%v", *s.offsets))
- }
- if s.parent != "" {
- params.Set("parent", s.parent)
- }
- if s.payloads != nil {
- params.Set("payloads", fmt.Sprintf("%v", *s.payloads))
- }
- if s.positions != nil {
- params.Set("positions", fmt.Sprintf("%v", *s.positions))
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if s.realtime != nil {
- params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.termStatistics != nil {
- params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics))
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *TermvectorsService) Validate() error {
- var invalid []string
- if s.index == "" {
- invalid = append(invalid, "Index")
- }
- if s.typ == "" {
- invalid = append(invalid, "Type")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else if s.bodyString != "" {
- body = s.bodyString
- } else {
- data := make(map[string]interface{})
- if s.doc != nil {
- data["doc"] = s.doc
- }
- if len(s.perFieldAnalyzer) > 0 {
- data["per_field_analyzer"] = s.perFieldAnalyzer
- }
- if s.filter != nil {
- src, err := s.filter.Source()
- if err != nil {
- return nil, err
- }
- data["filter"] = src
- }
- if len(data) > 0 {
- body = data
- }
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "GET",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(TermvectorsResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Filter settings --
-
-// TermvectorsFilterSettings adds additional filters to a Termsvector request.
-// It allows to filter terms based on their tf-idf scores.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-termvectors.html#_terms_filtering
-// for more information.
-type TermvectorsFilterSettings struct {
- maxNumTerms *int64
- minTermFreq *int64
- maxTermFreq *int64
- minDocFreq *int64
- maxDocFreq *int64
- minWordLength *int64
- maxWordLength *int64
-}
-
-// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct.
-func NewTermvectorsFilterSettings() *TermvectorsFilterSettings {
- return &TermvectorsFilterSettings{}
-}
-
-// MaxNumTerms specifies the maximum number of terms the must be returned per field.
-func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings {
- fs.maxNumTerms = &value
- return fs
-}
-
-// MinTermFreq ignores words with less than this frequency in the source doc.
-func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings {
- fs.minTermFreq = &value
- return fs
-}
-
-// MaxTermFreq ignores words with more than this frequency in the source doc.
-func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings {
- fs.maxTermFreq = &value
- return fs
-}
-
-// MinDocFreq ignores terms which do not occur in at least this many docs.
-func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings {
- fs.minDocFreq = &value
- return fs
-}
-
-// MaxDocFreq ignores terms which occur in more than this many docs.
-func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings {
- fs.maxDocFreq = &value
- return fs
-}
-
-// MinWordLength specifies the minimum word length below which words will be ignored.
-func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings {
- fs.minWordLength = &value
- return fs
-}
-
-// MaxWordLength specifies the maximum word length above which words will be ignored.
-func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings {
- fs.maxWordLength = &value
- return fs
-}
-
-// Source returns JSON for the query.
-func (fs *TermvectorsFilterSettings) Source() (interface{}, error) {
- source := make(map[string]interface{})
- if fs.maxNumTerms != nil {
- source["max_num_terms"] = *fs.maxNumTerms
- }
- if fs.minTermFreq != nil {
- source["min_term_freq"] = *fs.minTermFreq
- }
- if fs.maxTermFreq != nil {
- source["max_term_freq"] = *fs.maxTermFreq
- }
- if fs.minDocFreq != nil {
- source["min_doc_freq"] = *fs.minDocFreq
- }
- if fs.maxDocFreq != nil {
- source["max_doc_freq"] = *fs.maxDocFreq
- }
- if fs.minWordLength != nil {
- source["min_word_length"] = *fs.minWordLength
- }
- if fs.maxWordLength != nil {
- source["max_word_length"] = *fs.maxWordLength
- }
- return source, nil
-}
-
-// -- Response types --
-
-type TokenInfo struct {
- StartOffset int64 `json:"start_offset"`
- EndOffset int64 `json:"end_offset"`
- Position int64 `json:"position"`
- Payload string `json:"payload"`
-}
-
-type TermsInfo struct {
- DocFreq int64 `json:"doc_freq"`
- Score float64 `json:"score"`
- TermFreq int64 `json:"term_freq"`
- Ttf int64 `json:"ttf"`
- Tokens []TokenInfo `json:"tokens"`
-}
-
-type FieldStatistics struct {
- DocCount int64 `json:"doc_count"`
- SumDocFreq int64 `json:"sum_doc_freq"`
- SumTtf int64 `json:"sum_ttf"`
-}
-
-type TermVectorsFieldInfo struct {
- FieldStatistics FieldStatistics `json:"field_statistics"`
- Terms map[string]TermsInfo `json:"terms"`
-}
-
-// TermvectorsResponse is the response of TermvectorsService.Do.
-type TermvectorsResponse struct {
- Index string `json:"_index"`
- Type string `json:"_type"`
- Id string `json:"_id,omitempty"`
- Version int `json:"_version"`
- Found bool `json:"found"`
- Took int64 `json:"took"`
- TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"`
-}
diff --git a/vendor/github.com/olivere/elastic/termvectors_test.go b/vendor/github.com/olivere/elastic/termvectors_test.go
deleted file mode 100644
index 0391f2b0a..000000000
--- a/vendor/github.com/olivere/elastic/termvectors_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
- "time"
-)
-
-func TestTermVectorsBuildURL(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tests := []struct {
- Index string
- Type string
- Id string
- Expected string
- }{
- {
- "twitter",
- "doc",
- "",
- "/twitter/doc/_termvectors",
- },
- {
- "twitter",
- "doc",
- "1",
- "/twitter/doc/1/_termvectors",
- },
- }
-
- for _, test := range tests {
- builder := client.TermVectors(test.Index, test.Type)
- if test.Id != "" {
- builder = builder.Id(test.Id)
- }
- path, _, err := builder.buildURL()
- if err != nil {
- t.Fatal(err)
- }
- if path != test.Expected {
- t.Errorf("expected %q; got: %q", test.Expected, path)
- }
- }
-}
-
-func TestTermVectorsWithId(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
-
- // Add a document
- indexResult, err := client.Index().
- Index(testIndexName).
- Type("doc").
- Id("1").
- BodyJson(&tweet1).
- Refresh("true").
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if indexResult == nil {
- t.Errorf("expected result to be != nil; got: %v", indexResult)
- }
-
- // TermVectors by specifying ID
- field := "Message"
- result, err := client.TermVectors(testIndexName, "doc").
- Id("1").
- Fields(field).
- FieldStatistics(true).
- TermStatistics(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if result == nil {
- t.Fatal("expected to return information and statistics")
- }
- if !result.Found {
- t.Errorf("expected found to be %v; got: %v", true, result.Found)
- }
-}
-
-func TestTermVectorsWithDoc(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // Travis lags sometimes
- if isTravis() {
- time.Sleep(2 * time.Second)
- }
-
- // TermVectors by specifying Doc
- var doc = map[string]interface{}{
- "fullname": "John Doe",
- "text": "twitter test test test",
- }
- var perFieldAnalyzer = map[string]string{
- "fullname": "keyword",
- }
-
- result, err := client.TermVectors(testIndexName, "doc").
- Doc(doc).
- PerFieldAnalyzer(perFieldAnalyzer).
- FieldStatistics(true).
- TermStatistics(true).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if result == nil {
- t.Fatal("expected to return information and statistics")
- }
- if !result.Found {
- t.Errorf("expected found to be %v; got: %v", true, result.Found)
- }
-}
-
-func TestTermVectorsWithFilter(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // Travis lags sometimes
- if isTravis() {
- time.Sleep(2 * time.Second)
- }
-
- // TermVectors by specifying Doc
- var doc = map[string]interface{}{
- "fullname": "John Doe",
- "text": "twitter test test test",
- }
- var perFieldAnalyzer = map[string]string{
- "fullname": "keyword",
- }
-
- result, err := client.TermVectors(testIndexName, "doc").
- Doc(doc).
- PerFieldAnalyzer(perFieldAnalyzer).
- FieldStatistics(true).
- TermStatistics(true).
- Filter(NewTermvectorsFilterSettings().MinTermFreq(1)).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if result == nil {
- t.Fatal("expected to return information and statistics")
- }
- if !result.Found {
- t.Errorf("expected found to be %v; got: %v", true, result.Found)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/update.go b/vendor/github.com/olivere/elastic/update.go
deleted file mode 100644
index 5507fae4c..000000000
--- a/vendor/github.com/olivere/elastic/update.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// UpdateService updates a document in Elasticsearch.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html
-// for details.
-type UpdateService struct {
- client *Client
- index string
- typ string
- id string
- routing string
- parent string
- script *Script
- fields []string
- fsc *FetchSourceContext
- version *int64
- versionType string
- retryOnConflict *int
- refresh string
- waitForActiveShards string
- upsert interface{}
- scriptedUpsert *bool
- docAsUpsert *bool
- detectNoop *bool
- doc interface{}
- timeout string
- pretty bool
-}
-
-// NewUpdateService creates the service to update documents in Elasticsearch.
-func NewUpdateService(client *Client) *UpdateService {
- builder := &UpdateService{
- client: client,
- fields: make([]string, 0),
- }
- return builder
-}
-
-// Index is the name of the Elasticsearch index (required).
-func (b *UpdateService) Index(name string) *UpdateService {
- b.index = name
- return b
-}
-
-// Type is the type of the document (required).
-func (b *UpdateService) Type(typ string) *UpdateService {
- b.typ = typ
- return b
-}
-
-// Id is the identifier of the document to update (required).
-func (b *UpdateService) Id(id string) *UpdateService {
- b.id = id
- return b
-}
-
-// Routing specifies a specific routing value.
-func (b *UpdateService) Routing(routing string) *UpdateService {
- b.routing = routing
- return b
-}
-
-// Parent sets the id of the parent document.
-func (b *UpdateService) Parent(parent string) *UpdateService {
- b.parent = parent
- return b
-}
-
-// Script is the script definition.
-func (b *UpdateService) Script(script *Script) *UpdateService {
- b.script = script
- return b
-}
-
-// RetryOnConflict specifies how many times the operation should be retried
-// when a conflict occurs (default: 0).
-func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService {
- b.retryOnConflict = &retryOnConflict
- return b
-}
-
-// Fields is a list of fields to return in the response.
-func (b *UpdateService) Fields(fields ...string) *UpdateService {
- b.fields = make([]string, 0, len(fields))
- b.fields = append(b.fields, fields...)
- return b
-}
-
-// Version defines the explicit version number for concurrency control.
-func (b *UpdateService) Version(version int64) *UpdateService {
- b.version = &version
- return b
-}
-
-// VersionType is e.g. "internal".
-func (b *UpdateService) VersionType(versionType string) *UpdateService {
- b.versionType = versionType
- return b
-}
-
-// Refresh the index after performing the update.
-func (b *UpdateService) Refresh(refresh string) *UpdateService {
- b.refresh = refresh
- return b
-}
-
-// WaitForActiveShards sets the number of shard copies that must be active before
-// proceeding with the update operation. Defaults to 1, meaning the primary shard only.
-// Set to `all` for all shard copies, otherwise set to any non-negative value less than
-// or equal to the total number of copies for the shard (number of replicas + 1).
-func (b *UpdateService) WaitForActiveShards(waitForActiveShards string) *UpdateService {
- b.waitForActiveShards = waitForActiveShards
- return b
-}
-
-// Doc allows for updating a partial document.
-func (b *UpdateService) Doc(doc interface{}) *UpdateService {
- b.doc = doc
- return b
-}
-
-// Upsert can be used to index the document when it doesn't exist yet.
-// Use this e.g. to initialize a document with a default value.
-func (b *UpdateService) Upsert(doc interface{}) *UpdateService {
- b.upsert = doc
- return b
-}
-
-// DocAsUpsert can be used to insert the document if it doesn't already exist.
-func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService {
- b.docAsUpsert = &docAsUpsert
- return b
-}
-
-// DetectNoop will instruct Elasticsearch to check if changes will occur
-// when updating via Doc. It there aren't any changes, the request will
-// turn into a no-op.
-func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService {
- b.detectNoop = &detectNoop
- return b
-}
-
-// ScriptedUpsert should be set to true if the referenced script
-// (defined in Script or ScriptId) should be called to perform an insert.
-// The default is false.
-func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService {
- b.scriptedUpsert = &scriptedUpsert
- return b
-}
-
-// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms".
-func (b *UpdateService) Timeout(timeout string) *UpdateService {
- b.timeout = timeout
- return b
-}
-
-// Pretty instructs to return human readable, prettified JSON.
-func (b *UpdateService) Pretty(pretty bool) *UpdateService {
- b.pretty = pretty
- return b
-}
-
-// FetchSource asks Elasticsearch to return the updated _source in the response.
-func (s *UpdateService) FetchSource(fetchSource bool) *UpdateService {
- if s.fsc == nil {
- s.fsc = NewFetchSourceContext(fetchSource)
- } else {
- s.fsc.SetFetchSource(fetchSource)
- }
- return s
-}
-
-// FetchSourceContext indicates that _source should be returned in the response,
-// allowing wildcard patterns to be defined via FetchSourceContext.
-func (s *UpdateService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *UpdateService {
- s.fsc = fetchSourceContext
- return s
-}
-
-// url returns the URL part of the document request.
-func (b *UpdateService) url() (string, url.Values, error) {
- // Build url
- path := "/{index}/{type}/{id}/_update"
- path, err := uritemplates.Expand(path, map[string]string{
- "index": b.index,
- "type": b.typ,
- "id": b.id,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Parameters
- params := make(url.Values)
- if b.pretty {
- params.Set("pretty", "true")
- }
- if b.routing != "" {
- params.Set("routing", b.routing)
- }
- if b.parent != "" {
- params.Set("parent", b.parent)
- }
- if b.timeout != "" {
- params.Set("timeout", b.timeout)
- }
- if b.refresh != "" {
- params.Set("refresh", b.refresh)
- }
- if b.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", b.waitForActiveShards)
- }
- if len(b.fields) > 0 {
- params.Set("fields", strings.Join(b.fields, ","))
- }
- if b.version != nil {
- params.Set("version", fmt.Sprintf("%d", *b.version))
- }
- if b.versionType != "" {
- params.Set("version_type", b.versionType)
- }
- if b.retryOnConflict != nil {
- params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict))
- }
-
- return path, params, nil
-}
-
-// body returns the body part of the document request.
-func (b *UpdateService) body() (interface{}, error) {
- source := make(map[string]interface{})
-
- if b.script != nil {
- src, err := b.script.Source()
- if err != nil {
- return nil, err
- }
- source["script"] = src
- }
-
- if b.scriptedUpsert != nil {
- source["scripted_upsert"] = *b.scriptedUpsert
- }
-
- if b.upsert != nil {
- source["upsert"] = b.upsert
- }
-
- if b.doc != nil {
- source["doc"] = b.doc
- }
- if b.docAsUpsert != nil {
- source["doc_as_upsert"] = *b.docAsUpsert
- }
- if b.detectNoop != nil {
- source["detect_noop"] = *b.detectNoop
- }
- if b.fsc != nil {
- src, err := b.fsc.Source()
- if err != nil {
- return nil, err
- }
- source["_source"] = src
- }
-
- return source, nil
-}
-
-// Do executes the update operation.
-func (b *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) {
- path, params, err := b.url()
- if err != nil {
- return nil, err
- }
-
- // Get body of the request
- body, err := b.body()
- if err != nil {
- return nil, err
- }
-
- // Get response
- res, err := b.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return result
- ret := new(UpdateResponse)
- if err := b.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// UpdateResponse is the result of updating a document in Elasticsearch.
-type UpdateResponse struct {
- Index string `json:"_index,omitempty"`
- Type string `json:"_type,omitempty"`
- Id string `json:"_id,omitempty"`
- Version int64 `json:"_version,omitempty"`
- Result string `json:"result,omitempty"`
- Shards *shardsInfo `json:"_shards,omitempty"`
- SeqNo int64 `json:"_seq_no,omitempty"`
- PrimaryTerm int64 `json:"_primary_term,omitempty"`
- Status int `json:"status,omitempty"`
- ForcedRefresh bool `json:"forced_refresh,omitempty"`
- GetResult *GetResult `json:"get,omitempty"`
-}
diff --git a/vendor/github.com/olivere/elastic/update_by_query.go b/vendor/github.com/olivere/elastic/update_by_query.go
deleted file mode 100644
index 953d67388..000000000
--- a/vendor/github.com/olivere/elastic/update_by_query.go
+++ /dev/null
@@ -1,655 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/olivere/elastic/uritemplates"
-)
-
-// UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html.
-type UpdateByQueryService struct {
- client *Client
- pretty bool
- index []string
- typ []string
- script *Script
- query Query
- body interface{}
- xSource []string
- xSourceExclude []string
- xSourceInclude []string
- allowNoIndices *bool
- analyzeWildcard *bool
- analyzer string
- conflicts string
- defaultOperator string
- docvalueFields []string
- df string
- expandWildcards string
- explain *bool
- fielddataFields []string
- from *int
- ignoreUnavailable *bool
- lenient *bool
- lowercaseExpandedTerms *bool
- pipeline string
- preference string
- q string
- refresh string
- requestCache *bool
- requestsPerSecond *int
- routing []string
- scroll string
- scrollSize *int
- searchTimeout string
- searchType string
- size *int
- sort []string
- stats []string
- storedFields []string
- suggestField string
- suggestMode string
- suggestSize *int
- suggestText string
- terminateAfter *int
- timeout string
- trackScores *bool
- version *bool
- versionType *bool
- waitForActiveShards string
- waitForCompletion *bool
-}
-
-// NewUpdateByQueryService creates a new UpdateByQueryService.
-func NewUpdateByQueryService(client *Client) *UpdateByQueryService {
- return &UpdateByQueryService{
- client: client,
- }
-}
-
-// Index is a list of index names to search; use `_all` or empty string to
-// perform the operation on all indices.
-func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService {
- s.index = append(s.index, index...)
- return s
-}
-
-// Type is a list of document types to search; leave empty to perform
-// the operation on all types.
-func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService {
- s.typ = append(s.typ, typ...)
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService {
- s.pretty = pretty
- return s
-}
-
-// Script sets an update script.
-func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService {
- s.script = script
- return s
-}
-
-// Body specifies the body of the request. It overrides data being specified via
-// SearchService or Script.
-func (s *UpdateByQueryService) Body(body string) *UpdateByQueryService {
- s.body = body
- return s
-}
-
-// XSource is true or false to return the _source field or not,
-// or a list of fields to return.
-func (s *UpdateByQueryService) XSource(xSource ...string) *UpdateByQueryService {
- s.xSource = append(s.xSource, xSource...)
- return s
-}
-
-// XSourceExclude represents a list of fields to exclude from the returned _source field.
-func (s *UpdateByQueryService) XSourceExclude(xSourceExclude ...string) *UpdateByQueryService {
- s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
- return s
-}
-
-// XSourceInclude represents a list of fields to extract and return from the _source field.
-func (s *UpdateByQueryService) XSourceInclude(xSourceInclude ...string) *UpdateByQueryService {
- s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices expression
-// resolves into no concrete indices. (This includes `_all` string or when
-// no indices have been specified).
-func (s *UpdateByQueryService) AllowNoIndices(allowNoIndices bool) *UpdateByQueryService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// AnalyzeWildcard specifies whether wildcard and prefix queries should be
-// analyzed (default: false).
-func (s *UpdateByQueryService) AnalyzeWildcard(analyzeWildcard bool) *UpdateByQueryService {
- s.analyzeWildcard = &analyzeWildcard
- return s
-}
-
-// Analyzer specifies the analyzer to use for the query string.
-func (s *UpdateByQueryService) Analyzer(analyzer string) *UpdateByQueryService {
- s.analyzer = analyzer
- return s
-}
-
-// Conflicts indicates what to do when the process detects version conflicts.
-// Possible values are "proceed" and "abort".
-func (s *UpdateByQueryService) Conflicts(conflicts string) *UpdateByQueryService {
- s.conflicts = conflicts
- return s
-}
-
-// AbortOnVersionConflict aborts the request on version conflicts.
-// It is an alias to setting Conflicts("abort").
-func (s *UpdateByQueryService) AbortOnVersionConflict() *UpdateByQueryService {
- s.conflicts = "abort"
- return s
-}
-
-// ProceedOnVersionConflict aborts the request on version conflicts.
-// It is an alias to setting Conflicts("proceed").
-func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService {
- s.conflicts = "proceed"
- return s
-}
-
-// DefaultOperator is the default operator for query string query (AND or OR).
-func (s *UpdateByQueryService) DefaultOperator(defaultOperator string) *UpdateByQueryService {
- s.defaultOperator = defaultOperator
- return s
-}
-
-// DF specifies the field to use as default where no field prefix is given in the query string.
-func (s *UpdateByQueryService) DF(df string) *UpdateByQueryService {
- s.df = df
- return s
-}
-
-// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit.
-func (s *UpdateByQueryService) DocvalueFields(docvalueFields ...string) *UpdateByQueryService {
- s.docvalueFields = docvalueFields
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *UpdateByQueryService) ExpandWildcards(expandWildcards string) *UpdateByQueryService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Explain specifies whether to return detailed information about score
-// computation as part of a hit.
-func (s *UpdateByQueryService) Explain(explain bool) *UpdateByQueryService {
- s.explain = &explain
- return s
-}
-
-// FielddataFields is a list of fields to return as the field data
-// representation of a field for each hit.
-func (s *UpdateByQueryService) FielddataFields(fielddataFields ...string) *UpdateByQueryService {
- s.fielddataFields = append(s.fielddataFields, fielddataFields...)
- return s
-}
-
-// From is the starting offset (default: 0).
-func (s *UpdateByQueryService) From(from int) *UpdateByQueryService {
- s.from = &from
- return s
-}
-
-// IgnoreUnavailable indicates whether specified concrete indices should be
-// ignored when unavailable (missing or closed).
-func (s *UpdateByQueryService) IgnoreUnavailable(ignoreUnavailable bool) *UpdateByQueryService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Lenient specifies whether format-based query failures
-// (such as providing text to a numeric field) should be ignored.
-func (s *UpdateByQueryService) Lenient(lenient bool) *UpdateByQueryService {
- s.lenient = &lenient
- return s
-}
-
-// LowercaseExpandedTerms specifies whether query terms should be lowercased.
-func (s *UpdateByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *UpdateByQueryService {
- s.lowercaseExpandedTerms = &lowercaseExpandedTerms
- return s
-}
-
-// Pipeline specifies the ingest pipeline to set on index requests made by this action (default: none).
-func (s *UpdateByQueryService) Pipeline(pipeline string) *UpdateByQueryService {
- s.pipeline = pipeline
- return s
-}
-
-// Preference specifies the node or shard the operation should be performed on
-// (default: random).
-func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryService {
- s.preference = preference
- return s
-}
-
-// Q specifies the query in the Lucene query string syntax.
-func (s *UpdateByQueryService) Q(q string) *UpdateByQueryService {
- s.q = q
- return s
-}
-
-// Query sets a query definition using the Query DSL.
-func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService {
- s.query = query
- return s
-}
-
-// Refresh indicates whether the effected indexes should be refreshed.
-func (s *UpdateByQueryService) Refresh(refresh string) *UpdateByQueryService {
- s.refresh = refresh
- return s
-}
-
-// RequestCache specifies if request cache should be used for this request
-// or not, defaults to index level setting.
-func (s *UpdateByQueryService) RequestCache(requestCache bool) *UpdateByQueryService {
- s.requestCache = &requestCache
- return s
-}
-
-// RequestsPerSecond sets the throttle on this request in sub-requests per second.
-// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
-func (s *UpdateByQueryService) RequestsPerSecond(requestsPerSecond int) *UpdateByQueryService {
- s.requestsPerSecond = &requestsPerSecond
- return s
-}
-
-// Routing is a list of specific routing values.
-func (s *UpdateByQueryService) Routing(routing ...string) *UpdateByQueryService {
- s.routing = append(s.routing, routing...)
- return s
-}
-
-// Scroll specifies how long a consistent view of the index should be maintained
-// for scrolled search.
-func (s *UpdateByQueryService) Scroll(scroll string) *UpdateByQueryService {
- s.scroll = scroll
- return s
-}
-
-// ScrollSize is the size on the scroll request powering the update_by_query.
-func (s *UpdateByQueryService) ScrollSize(scrollSize int) *UpdateByQueryService {
- s.scrollSize = &scrollSize
- return s
-}
-
-// SearchTimeout defines an explicit timeout for each search request.
-// Defaults to no timeout.
-func (s *UpdateByQueryService) SearchTimeout(searchTimeout string) *UpdateByQueryService {
- s.searchTimeout = searchTimeout
- return s
-}
-
-// SearchType is the search operation type. Possible values are
-// "query_then_fetch" and "dfs_query_then_fetch".
-func (s *UpdateByQueryService) SearchType(searchType string) *UpdateByQueryService {
- s.searchType = searchType
- return s
-}
-
-// Size represents the number of hits to return (default: 10).
-func (s *UpdateByQueryService) Size(size int) *UpdateByQueryService {
- s.size = &size
- return s
-}
-
-// Sort is a list of <field>:<direction> pairs.
-func (s *UpdateByQueryService) Sort(sort ...string) *UpdateByQueryService {
- s.sort = append(s.sort, sort...)
- return s
-}
-
-// SortByField adds a sort order.
-func (s *UpdateByQueryService) SortByField(field string, ascending bool) *UpdateByQueryService {
- if ascending {
- s.sort = append(s.sort, fmt.Sprintf("%s:asc", field))
- } else {
- s.sort = append(s.sort, fmt.Sprintf("%s:desc", field))
- }
- return s
-}
-
-// Stats specifies specific tag(s) of the request for logging and statistical purposes.
-func (s *UpdateByQueryService) Stats(stats ...string) *UpdateByQueryService {
- s.stats = append(s.stats, stats...)
- return s
-}
-
-// StoredFields specifies the list of stored fields to return as part of a hit.
-func (s *UpdateByQueryService) StoredFields(storedFields ...string) *UpdateByQueryService {
- s.storedFields = storedFields
- return s
-}
-
-// SuggestField specifies which field to use for suggestions.
-func (s *UpdateByQueryService) SuggestField(suggestField string) *UpdateByQueryService {
- s.suggestField = suggestField
- return s
-}
-
-// SuggestMode specifies the suggest mode. Possible values are
-// "missing", "popular", and "always".
-func (s *UpdateByQueryService) SuggestMode(suggestMode string) *UpdateByQueryService {
- s.suggestMode = suggestMode
- return s
-}
-
-// SuggestSize specifies how many suggestions to return in response.
-func (s *UpdateByQueryService) SuggestSize(suggestSize int) *UpdateByQueryService {
- s.suggestSize = &suggestSize
- return s
-}
-
-// SuggestText specifies the source text for which the suggestions should be returned.
-func (s *UpdateByQueryService) SuggestText(suggestText string) *UpdateByQueryService {
- s.suggestText = suggestText
- return s
-}
-
-// TerminateAfter indicates the maximum number of documents to collect
-// for each shard, upon reaching which the query execution will terminate early.
-func (s *UpdateByQueryService) TerminateAfter(terminateAfter int) *UpdateByQueryService {
- s.terminateAfter = &terminateAfter
- return s
-}
-
-// Timeout is the time each individual bulk request should wait for shards
-// that are unavailable.
-func (s *UpdateByQueryService) Timeout(timeout string) *UpdateByQueryService {
- s.timeout = timeout
- return s
-}
-
-// TimeoutInMillis sets the timeout in milliseconds.
-func (s *UpdateByQueryService) TimeoutInMillis(timeoutInMillis int) *UpdateByQueryService {
- s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
- return s
-}
-
-// TrackScores indicates whether to calculate and return scores even if
-// they are not used for sorting.
-func (s *UpdateByQueryService) TrackScores(trackScores bool) *UpdateByQueryService {
- s.trackScores = &trackScores
- return s
-}
-
-// Version specifies whether to return document version as part of a hit.
-func (s *UpdateByQueryService) Version(version bool) *UpdateByQueryService {
- s.version = &version
- return s
-}
-
-// VersionType indicates if the document increment the version number (internal)
-// on hit or not (reindex).
-func (s *UpdateByQueryService) VersionType(versionType bool) *UpdateByQueryService {
- s.versionType = &versionType
- return s
-}
-
-// WaitForActiveShards sets the number of shard copies that must be active before proceeding
-// with the update by query operation. Defaults to 1, meaning the primary shard only.
-// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal
-// to the total number of copies for the shard (number of replicas + 1).
-func (s *UpdateByQueryService) WaitForActiveShards(waitForActiveShards string) *UpdateByQueryService {
- s.waitForActiveShards = waitForActiveShards
- return s
-}
-
-// WaitForCompletion indicates if the request should block until the reindex is complete.
-func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService {
- s.waitForCompletion = &waitForCompletion
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *UpdateByQueryService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.typ) > 0 {
- path, err = uritemplates.Expand("/{index}/{type}/_update_by_query", map[string]string{
- "index": strings.Join(s.index, ","),
- "type": strings.Join(s.typ, ","),
- })
- } else {
- path, err = uritemplates.Expand("/{index}/_update_by_query", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", "true")
- }
- if len(s.xSource) > 0 {
- params.Set("_source", strings.Join(s.xSource, ","))
- }
- if len(s.xSourceExclude) > 0 {
- params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
- }
- if len(s.xSourceInclude) > 0 {
- params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
- }
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.analyzer != "" {
- params.Set("analyzer", s.analyzer)
- }
- if s.analyzeWildcard != nil {
- params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
- }
- if s.conflicts != "" {
- params.Set("conflicts", s.conflicts)
- }
- if s.defaultOperator != "" {
- params.Set("default_operator", s.defaultOperator)
- }
- if s.df != "" {
- params.Set("df", s.df)
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if s.explain != nil {
- params.Set("explain", fmt.Sprintf("%v", *s.explain))
- }
- if len(s.storedFields) > 0 {
- params.Set("stored_fields", strings.Join(s.storedFields, ","))
- }
- if len(s.docvalueFields) > 0 {
- params.Set("docvalue_fields", strings.Join(s.docvalueFields, ","))
- }
- if len(s.fielddataFields) > 0 {
- params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
- }
- if s.from != nil {
- params.Set("from", fmt.Sprintf("%d", *s.from))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.lenient != nil {
- params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
- }
- if s.lowercaseExpandedTerms != nil {
- params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
- }
- if s.pipeline != "" {
- params.Set("pipeline", s.pipeline)
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- if s.q != "" {
- params.Set("q", s.q)
- }
- if s.refresh != "" {
- params.Set("refresh", s.refresh)
- }
- if s.requestCache != nil {
- params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
- }
- if len(s.routing) > 0 {
- params.Set("routing", strings.Join(s.routing, ","))
- }
- if s.scroll != "" {
- params.Set("scroll", s.scroll)
- }
- if s.scrollSize != nil {
- params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize))
- }
- if s.searchTimeout != "" {
- params.Set("search_timeout", s.searchTimeout)
- }
- if s.searchType != "" {
- params.Set("search_type", s.searchType)
- }
- if s.size != nil {
- params.Set("size", fmt.Sprintf("%d", *s.size))
- }
- if len(s.sort) > 0 {
- params.Set("sort", strings.Join(s.sort, ","))
- }
- if len(s.stats) > 0 {
- params.Set("stats", strings.Join(s.stats, ","))
- }
- if s.suggestField != "" {
- params.Set("suggest_field", s.suggestField)
- }
- if s.suggestMode != "" {
- params.Set("suggest_mode", s.suggestMode)
- }
- if s.suggestSize != nil {
- params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize))
- }
- if s.suggestText != "" {
- params.Set("suggest_text", s.suggestText)
- }
- if s.terminateAfter != nil {
- params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
- }
- if s.timeout != "" {
- params.Set("timeout", s.timeout)
- }
- if s.trackScores != nil {
- params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores))
- }
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", *s.version))
- }
- if s.versionType != nil {
- params.Set("version_type", fmt.Sprintf("%v", *s.versionType))
- }
- if s.waitForActiveShards != "" {
- params.Set("wait_for_active_shards", s.waitForActiveShards)
- }
- if s.waitForCompletion != nil {
- params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
- }
- if s.requestsPerSecond != nil {
- params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *UpdateByQueryService) Validate() error {
- var invalid []string
- if len(s.index) == 0 {
- invalid = append(invalid, "Index")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// getBody returns the body part of the document request.
-func (s *UpdateByQueryService) getBody() (interface{}, error) {
- if s.body != nil {
- return s.body, nil
- }
- source := make(map[string]interface{})
- if s.script != nil {
- src, err := s.script.Source()
- if err != nil {
- return nil, err
- }
- source["script"] = src
- }
- if s.query != nil {
- src, err := s.query.Source()
- if err != nil {
- return nil, err
- }
- source["query"] = src
- }
- return source, nil
-}
-
-// Do executes the operation.
-func (s *UpdateByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- body, err := s.getBody()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
- Method: "POST",
- Path: path,
- Params: params,
- Body: body,
- })
- if err != nil {
- return nil, err
- }
-
- // Return operation response (BulkIndexByScrollResponse is defined in DeleteByQuery)
- ret := new(BulkIndexByScrollResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
diff --git a/vendor/github.com/olivere/elastic/update_by_query_test.go b/vendor/github.com/olivere/elastic/update_by_query_test.go
deleted file mode 100644
index fde924dd5..000000000
--- a/vendor/github.com/olivere/elastic/update_by_query_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestUpdateByQueryBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Indices []string
- Types []string
- Expected string
- ExpectErr bool
- }{
- {
- []string{},
- []string{},
- "",
- true,
- },
- {
- []string{"index1"},
- []string{},
- "/index1/_update_by_query",
- false,
- },
- {
- []string{"index1", "index2"},
- []string{},
- "/index1%2Cindex2/_update_by_query",
- false,
- },
- {
- []string{},
- []string{"type1"},
- "",
- true,
- },
- {
- []string{"index1"},
- []string{"type1"},
- "/index1/type1/_update_by_query",
- false,
- },
- {
- []string{"index1", "index2"},
- []string{"type1", "type2"},
- "/index1%2Cindex2/type1%2Ctype2/_update_by_query",
- false,
- },
- }
-
- for i, test := range tests {
- builder := client.UpdateByQuery().Index(test.Indices...).Type(test.Types...)
- err := builder.Validate()
- if err != nil {
- if !test.ExpectErr {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- } else {
- // err == nil
- if test.ExpectErr {
- t.Errorf("case #%d: expected error", i+1)
- continue
- }
- path, _, _ := builder.buildURL()
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
- }
-}
-
-func TestUpdateByQueryBodyWithQuery(t *testing.T) {
- client := setupTestClient(t)
- out, err := client.UpdateByQuery().Query(NewTermQuery("user", "olivere")).getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"query":{"term":{"user":"olivere"}}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestUpdateByQueryBodyWithQueryAndScript(t *testing.T) {
- client := setupTestClient(t)
- out, err := client.UpdateByQuery().
- Query(NewTermQuery("user", "olivere")).
- Script(NewScriptInline("ctx._source.likes++")).
- getBody()
- if err != nil {
- t.Fatal(err)
- }
- b, err := json.Marshal(out)
- if err != nil {
- t.Fatal(err)
- }
- got := string(b)
- want := `{"query":{"term":{"user":"olivere"}},"script":{"source":"ctx._source.likes++"}}`
- if got != want {
- t.Fatalf("\ngot %s\nwant %s", got, want)
- }
-}
-
-func TestUpdateByQuery(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
- if esversion < "2.3.0" {
- t.Skipf("Elasticsearch %v does not support update-by-query yet", esversion)
- }
-
- sourceCount, err := client.Count(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if sourceCount <= 0 {
- t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
- }
-
- res, err := client.UpdateByQuery(testIndexName).ProceedOnVersionConflict().Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("response is nil")
- }
- if res.Updated != sourceCount {
- t.Fatalf("expected %d; got: %d", sourceCount, res.Updated)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/update_integration_test.go b/vendor/github.com/olivere/elastic/update_integration_test.go
deleted file mode 100644
index f36925298..000000000
--- a/vendor/github.com/olivere/elastic/update_integration_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "testing"
-)
-
-func TestUpdateWithScript(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- // Get original
- getRes, err := client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- var original tweet
- if err := json.Unmarshal(*getRes.Source, &original); err != nil {
- t.Fatal(err)
- }
-
- // Update with script
- updRes, err := client.Update().Index(testIndexName).Type("doc").Id("1").
- Script(
- NewScript(`ctx._source.message = "Updated message text."`).Lang("painless"),
- ).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if updRes == nil {
- t.Fatal("response is nil")
- }
- if want, have := "updated", updRes.Result; want != have {
- t.Fatalf("want Result = %q, have %v", want, have)
- }
-
- // Get new version
- getRes, err = client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- var updated tweet
- if err := json.Unmarshal(*getRes.Source, &updated); err != nil {
- t.Fatal(err)
- }
-
- if want, have := original.User, updated.User; want != have {
- t.Fatalf("want User = %q, have %v", want, have)
- }
- if want, have := "Updated message text.", updated.Message; want != have {
- t.Fatalf("want Message = %q, have %v", want, have)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/update_test.go b/vendor/github.com/olivere/elastic/update_test.go
deleted file mode 100644
index 1f04cedd6..000000000
--- a/vendor/github.com/olivere/elastic/update_test.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "testing"
-)
-
-func TestUpdateViaScript(t *testing.T) {
- client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- update := client.Update().
- Index("test").Type("type1").Id("1").
- Script(NewScript("ctx._source.tags += tag").Params(map[string]interface{}{"tag": "blue"}).Lang("groovy"))
- path, params, err := update.url()
- if err != nil {
- t.Fatalf("expected to return URL, got: %v", err)
- }
- expectedPath := `/test/type1/1/_update`
- if expectedPath != path {
- t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
- }
- expectedParams := url.Values{}
- if expectedParams.Encode() != params.Encode() {
- t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
- }
- body, err := update.body()
- if err != nil {
- t.Fatalf("expected to return body, got: %v", err)
- }
- data, err := json.Marshal(body)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"script":{"lang":"groovy","params":{"tag":"blue"},"source":"ctx._source.tags += tag"}}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
-
-func TestUpdateViaScriptId(t *testing.T) {
- client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- scriptParams := map[string]interface{}{
- "pageViewEvent": map[string]interface{}{
- "url": "foo.com/bar",
- "response": 404,
- "time": "2014-01-01 12:32",
- },
- }
- script := NewScriptStored("my_web_session_summariser").Params(scriptParams)
-
- update := client.Update().
- Index("sessions").Type("session").Id("dh3sgudg8gsrgl").
- Script(script).
- ScriptedUpsert(true).
- Upsert(map[string]interface{}{})
- path, params, err := update.url()
- if err != nil {
- t.Fatalf("expected to return URL, got: %v", err)
- }
- expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update`
- if expectedPath != path {
- t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
- }
- expectedParams := url.Values{}
- if expectedParams.Encode() != params.Encode() {
- t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
- }
- body, err := update.body()
- if err != nil {
- t.Fatalf("expected to return body, got: %v", err)
- }
- data, err := json.Marshal(body)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"script":{"id":"my_web_session_summariser","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
-
-func TestUpdateViaScriptAndUpsert(t *testing.T) {
- client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- update := client.Update().
- Index("test").Type("type1").Id("1").
- Script(NewScript("ctx._source.counter += count").Params(map[string]interface{}{"count": 4})).
- Upsert(map[string]interface{}{"counter": 1})
- path, params, err := update.url()
- if err != nil {
- t.Fatalf("expected to return URL, got: %v", err)
- }
- expectedPath := `/test/type1/1/_update`
- if expectedPath != path {
- t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
- }
- expectedParams := url.Values{}
- if expectedParams.Encode() != params.Encode() {
- t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
- }
- body, err := update.body()
- if err != nil {
- t.Fatalf("expected to return body, got: %v", err)
- }
- data, err := json.Marshal(body)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"script":{"params":{"count":4},"source":"ctx._source.counter += count"},"upsert":{"counter":1}}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
-
-func TestUpdateViaDoc(t *testing.T) {
- client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- update := client.Update().
- Index("test").Type("type1").Id("1").
- Doc(map[string]interface{}{"name": "new_name"}).
- DetectNoop(true)
- path, params, err := update.url()
- if err != nil {
- t.Fatalf("expected to return URL, got: %v", err)
- }
- expectedPath := `/test/type1/1/_update`
- if expectedPath != path {
- t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
- }
- expectedParams := url.Values{}
- if expectedParams.Encode() != params.Encode() {
- t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
- }
- body, err := update.body()
- if err != nil {
- t.Fatalf("expected to return body, got: %v", err)
- }
- data, err := json.Marshal(body)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"detect_noop":true,"doc":{"name":"new_name"}}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
-
-func TestUpdateViaDocAndUpsert(t *testing.T) {
- client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- update := client.Update().
- Index("test").Type("type1").Id("1").
- Doc(map[string]interface{}{"name": "new_name"}).
- DocAsUpsert(true).
- Timeout("1s").
- Refresh("true")
- path, params, err := update.url()
- if err != nil {
- t.Fatalf("expected to return URL, got: %v", err)
- }
- expectedPath := `/test/type1/1/_update`
- if expectedPath != path {
- t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
- }
- expectedParams := url.Values{"refresh": []string{"true"}, "timeout": []string{"1s"}}
- if expectedParams.Encode() != params.Encode() {
- t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
- }
- body, err := update.body()
- if err != nil {
- t.Fatalf("expected to return body, got: %v", err)
- }
- data, err := json.Marshal(body)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"doc":{"name":"new_name"},"doc_as_upsert":true}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
-
-func TestUpdateViaDocAndUpsertAndFetchSource(t *testing.T) {
- client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- update := client.Update().
- Index("test").Type("type1").Id("1").
- Doc(map[string]interface{}{"name": "new_name"}).
- DocAsUpsert(true).
- Timeout("1s").
- Refresh("true").
- FetchSource(true)
- path, params, err := update.url()
- if err != nil {
- t.Fatalf("expected to return URL, got: %v", err)
- }
- expectedPath := `/test/type1/1/_update`
- if expectedPath != path {
- t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
- }
- expectedParams := url.Values{
- "refresh": []string{"true"},
- "timeout": []string{"1s"},
- }
- if expectedParams.Encode() != params.Encode() {
- t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
- }
- body, err := update.body()
- if err != nil {
- t.Fatalf("expected to return body, got: %v", err)
- }
- data, err := json.Marshal(body)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"_source":true,"doc":{"name":"new_name"},"doc_as_upsert":true}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
-
-func TestUpdateAndFetchSource(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
-
- res, err := client.Update().
- Index(testIndexName).Type("doc").Id("1").
- Doc(map[string]interface{}{"user": "sandrae"}).
- DetectNoop(true).
- FetchSource(true).
- Do(context.Background())
- if err != nil {
- t.Fatal(err)
- }
- if res == nil {
- t.Fatal("expected response != nil")
- }
- if res.GetResult == nil {
- t.Fatal("expected GetResult != nil")
- }
- data, err := json.Marshal(res.GetResult.Source)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"user":"sandrae","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
diff --git a/vendor/github.com/olivere/elastic/uritemplates/LICENSE b/vendor/github.com/olivere/elastic/uritemplates/LICENSE
deleted file mode 100644
index de9c88cb6..000000000
--- a/vendor/github.com/olivere/elastic/uritemplates/LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-Copyright (c) 2013 Joshua Tacoma
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/olivere/elastic/uritemplates/uritemplates.go b/vendor/github.com/olivere/elastic/uritemplates/uritemplates.go
deleted file mode 100644
index 8a84813fe..000000000
--- a/vendor/github.com/olivere/elastic/uritemplates/uritemplates.go
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2013 Joshua Tacoma. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package uritemplates is a level 4 implementation of RFC 6570 (URI
-// Template, http://tools.ietf.org/html/rfc6570).
-//
-// To use uritemplates, parse a template string and expand it with a value
-// map:
-//
-// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
-// values := make(map[string]interface{})
-// values["user"] = "jtacoma"
-// values["repo"] = "uritemplates"
-// expanded, _ := template.ExpandString(values)
-// fmt.Printf(expanded)
-//
-package uritemplates
-
-import (
- "bytes"
- "errors"
- "fmt"
- "reflect"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
- reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
- validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
- hex = []byte("0123456789ABCDEF")
-)
-
-func pctEncode(src []byte) []byte {
- dst := make([]byte, len(src)*3)
- for i, b := range src {
- buf := dst[i*3 : i*3+3]
- buf[0] = 0x25
- buf[1] = hex[b/16]
- buf[2] = hex[b%16]
- }
- return dst
-}
-
-func escape(s string, allowReserved bool) (escaped string) {
- if allowReserved {
- escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
- } else {
- escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
- }
- return escaped
-}
-
-// A UriTemplate is a parsed representation of a URI template.
-type UriTemplate struct {
- raw string
- parts []templatePart
-}
-
-// Parse parses a URI template string into a UriTemplate object.
-func Parse(rawtemplate string) (template *UriTemplate, err error) {
- template = new(UriTemplate)
- template.raw = rawtemplate
- split := strings.Split(rawtemplate, "{")
- template.parts = make([]templatePart, len(split)*2-1)
- for i, s := range split {
- if i == 0 {
- if strings.Contains(s, "}") {
- err = errors.New("unexpected }")
- break
- }
- template.parts[i].raw = s
- } else {
- subsplit := strings.Split(s, "}")
- if len(subsplit) != 2 {
- err = errors.New("malformed template")
- break
- }
- expression := subsplit[0]
- template.parts[i*2-1], err = parseExpression(expression)
- if err != nil {
- break
- }
- template.parts[i*2].raw = subsplit[1]
- }
- }
- if err != nil {
- template = nil
- }
- return template, err
-}
-
-type templatePart struct {
- raw string
- terms []templateTerm
- first string
- sep string
- named bool
- ifemp string
- allowReserved bool
-}
-
-type templateTerm struct {
- name string
- explode bool
- truncate int
-}
-
-func parseExpression(expression string) (result templatePart, err error) {
- switch expression[0] {
- case '+':
- result.sep = ","
- result.allowReserved = true
- expression = expression[1:]
- case '.':
- result.first = "."
- result.sep = "."
- expression = expression[1:]
- case '/':
- result.first = "/"
- result.sep = "/"
- expression = expression[1:]
- case ';':
- result.first = ";"
- result.sep = ";"
- result.named = true
- expression = expression[1:]
- case '?':
- result.first = "?"
- result.sep = "&"
- result.named = true
- result.ifemp = "="
- expression = expression[1:]
- case '&':
- result.first = "&"
- result.sep = "&"
- result.named = true
- result.ifemp = "="
- expression = expression[1:]
- case '#':
- result.first = "#"
- result.sep = ","
- result.allowReserved = true
- expression = expression[1:]
- default:
- result.sep = ","
- }
- rawterms := strings.Split(expression, ",")
- result.terms = make([]templateTerm, len(rawterms))
- for i, raw := range rawterms {
- result.terms[i], err = parseTerm(raw)
- if err != nil {
- break
- }
- }
- return result, err
-}
-
-func parseTerm(term string) (result templateTerm, err error) {
- if strings.HasSuffix(term, "*") {
- result.explode = true
- term = term[:len(term)-1]
- }
- split := strings.Split(term, ":")
- if len(split) == 1 {
- result.name = term
- } else if len(split) == 2 {
- result.name = split[0]
- var parsed int64
- parsed, err = strconv.ParseInt(split[1], 10, 0)
- result.truncate = int(parsed)
- } else {
- err = errors.New("multiple colons in same term")
- }
- if !validname.MatchString(result.name) {
- err = errors.New("not a valid name: " + result.name)
- }
- if result.explode && result.truncate > 0 {
- err = errors.New("both explode and prefix modifers on same term")
- }
- return result, err
-}
-
-// Expand expands a URI template with a set of values to produce a string.
-func (self *UriTemplate) Expand(value interface{}) (string, error) {
- values, ismap := value.(map[string]interface{})
- if !ismap {
- if m, ismap := struct2map(value); !ismap {
- return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
- } else {
- return self.Expand(m)
- }
- }
- var buf bytes.Buffer
- for _, p := range self.parts {
- err := p.expand(&buf, values)
- if err != nil {
- return "", err
- }
- }
- return buf.String(), nil
-}
-
-func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
- if len(self.raw) > 0 {
- buf.WriteString(self.raw)
- return nil
- }
- var zeroLen = buf.Len()
- buf.WriteString(self.first)
- var firstLen = buf.Len()
- for _, term := range self.terms {
- value, exists := values[term.name]
- if !exists {
- continue
- }
- if buf.Len() != firstLen {
- buf.WriteString(self.sep)
- }
- switch v := value.(type) {
- case string:
- self.expandString(buf, term, v)
- case []interface{}:
- self.expandArray(buf, term, v)
- case map[string]interface{}:
- if term.truncate > 0 {
- return errors.New("cannot truncate a map expansion")
- }
- self.expandMap(buf, term, v)
- default:
- if m, ismap := struct2map(value); ismap {
- if term.truncate > 0 {
- return errors.New("cannot truncate a map expansion")
- }
- self.expandMap(buf, term, m)
- } else {
- str := fmt.Sprintf("%v", value)
- self.expandString(buf, term, str)
- }
- }
- }
- if buf.Len() == firstLen {
- original := buf.Bytes()[:zeroLen]
- buf.Reset()
- buf.Write(original)
- }
- return nil
-}
-
-func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
- if self.named {
- buf.WriteString(name)
- if empty {
- buf.WriteString(self.ifemp)
- } else {
- buf.WriteString("=")
- }
- }
-}
-
-func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
- if len(s) > t.truncate && t.truncate > 0 {
- s = s[:t.truncate]
- }
- self.expandName(buf, t.name, len(s) == 0)
- buf.WriteString(escape(s, self.allowReserved))
-}
-
-func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
- if len(a) == 0 {
- return
- } else if !t.explode {
- self.expandName(buf, t.name, false)
- }
- for i, value := range a {
- if t.explode && i > 0 {
- buf.WriteString(self.sep)
- } else if i > 0 {
- buf.WriteString(",")
- }
- var s string
- switch v := value.(type) {
- case string:
- s = v
- default:
- s = fmt.Sprintf("%v", v)
- }
- if len(s) > t.truncate && t.truncate > 0 {
- s = s[:t.truncate]
- }
- if self.named && t.explode {
- self.expandName(buf, t.name, len(s) == 0)
- }
- buf.WriteString(escape(s, self.allowReserved))
- }
-}
-
-func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
- if len(m) == 0 {
- return
- }
- if !t.explode {
- self.expandName(buf, t.name, len(m) == 0)
- }
- var firstLen = buf.Len()
- for k, value := range m {
- if firstLen != buf.Len() {
- if t.explode {
- buf.WriteString(self.sep)
- } else {
- buf.WriteString(",")
- }
- }
- var s string
- switch v := value.(type) {
- case string:
- s = v
- default:
- s = fmt.Sprintf("%v", v)
- }
- if t.explode {
- buf.WriteString(escape(k, self.allowReserved))
- buf.WriteRune('=')
- buf.WriteString(escape(s, self.allowReserved))
- } else {
- buf.WriteString(escape(k, self.allowReserved))
- buf.WriteRune(',')
- buf.WriteString(escape(s, self.allowReserved))
- }
- }
-}
-
-func struct2map(v interface{}) (map[string]interface{}, bool) {
- value := reflect.ValueOf(v)
- switch value.Type().Kind() {
- case reflect.Ptr:
- return struct2map(value.Elem().Interface())
- case reflect.Struct:
- m := make(map[string]interface{})
- for i := 0; i < value.NumField(); i++ {
- tag := value.Type().Field(i).Tag
- var name string
- if strings.Contains(string(tag), ":") {
- name = tag.Get("uri")
- } else {
- name = strings.TrimSpace(string(tag))
- }
- if len(name) == 0 {
- name = value.Type().Field(i).Name
- }
- m[name] = value.Field(i).Interface()
- }
- return m, true
- }
- return nil, false
-}
diff --git a/vendor/github.com/olivere/elastic/uritemplates/utils.go b/vendor/github.com/olivere/elastic/uritemplates/utils.go
deleted file mode 100644
index 399ef4623..000000000
--- a/vendor/github.com/olivere/elastic/uritemplates/utils.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package uritemplates
-
-func Expand(path string, expansions map[string]string) (string, error) {
- template, err := Parse(path)
- if err != nil {
- return "", err
- }
- values := make(map[string]interface{})
- for k, v := range expansions {
- values[k] = v
- }
- return template.Expand(values)
-}
diff --git a/vendor/github.com/olivere/elastic/uritemplates/utils_test.go b/vendor/github.com/olivere/elastic/uritemplates/utils_test.go
deleted file mode 100644
index 633949b6f..000000000
--- a/vendor/github.com/olivere/elastic/uritemplates/utils_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package uritemplates
-
-import (
- "testing"
-)
-
-type ExpandTest struct {
- in string
- expansions map[string]string
- want string
-}
-
-var expandTests = []ExpandTest{
- // #0: no expansions
- {
- "http://www.golang.org/",
- map[string]string{},
- "http://www.golang.org/",
- },
- // #1: one expansion, no escaping
- {
- "http://www.golang.org/{bucket}/delete",
- map[string]string{
- "bucket": "red",
- },
- "http://www.golang.org/red/delete",
- },
- // #2: one expansion, with hex escapes
- {
- "http://www.golang.org/{bucket}/delete",
- map[string]string{
- "bucket": "red/blue",
- },
- "http://www.golang.org/red%2Fblue/delete",
- },
- // #3: one expansion, with space
- {
- "http://www.golang.org/{bucket}/delete",
- map[string]string{
- "bucket": "red or blue",
- },
- "http://www.golang.org/red%20or%20blue/delete",
- },
- // #4: expansion not found
- {
- "http://www.golang.org/{object}/delete",
- map[string]string{
- "bucket": "red or blue",
- },
- "http://www.golang.org//delete",
- },
- // #5: multiple expansions
- {
- "http://www.golang.org/{one}/{two}/{three}/get",
- map[string]string{
- "one": "ONE",
- "two": "TWO",
- "three": "THREE",
- },
- "http://www.golang.org/ONE/TWO/THREE/get",
- },
- // #6: utf-8 characters
- {
- "http://www.golang.org/{bucket}/get",
- map[string]string{
- "bucket": "£100",
- },
- "http://www.golang.org/%C2%A3100/get",
- },
- // #7: punctuations
- {
- "http://www.golang.org/{bucket}/get",
- map[string]string{
- "bucket": `/\@:,.*~`,
- },
- "http://www.golang.org/%2F%5C%40%3A%2C.%2A~/get",
- },
- // #8: mis-matched brackets
- {
- "http://www.golang.org/{bucket/get",
- map[string]string{
- "bucket": "red",
- },
- "",
- },
- // #9: "+" prefix for suppressing escape
- // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3
- {
- "http://www.golang.org/{+topic}",
- map[string]string{
- "topic": "/topics/myproject/mytopic",
- },
- // The double slashes here look weird, but it's intentional
- "http://www.golang.org//topics/myproject/mytopic",
- },
-}
-
-func TestExpand(t *testing.T) {
- for i, test := range expandTests {
- got, _ := Expand(test.in, test.expansions)
- if got != test.want {
- t.Errorf("got %q expected %q in test %d", got, test.want, i)
- }
- }
-}