Shingle token filter

Shingle token filter

Add shingles, or word n-grams, to a token stream by concatenating adjacent tokens. By default, the shingle token filter outputs two-word shingles and unigrams.

For example, many tokenizers convert the lazy dog to [ the, lazy, dog ]. You can use the shingle filter to add two-word shingles to this stream: [ the, the lazy, lazy, lazy dog, dog ].

Shingles are often used to help speed up phrase queries, such as match_phrase. Rather than creating shingles using the shingles filter, we recommend you use the index-phrases mapping parameter on the appropriate text field instead.

This filter uses Lucene’s ShingleFilter.

Example

The following analyze API request uses the shingle filter to add two-word shingles to the token stream for quick brown fox jumps:

  1. resp = client.indices.analyze(
  2. tokenizer="whitespace",
  3. filter=[
  4. "shingle"
  5. ],
  6. text="quick brown fox jumps",
  7. )
  8. print(resp)
  1. response = client.indices.analyze(
  2. body: {
  3. tokenizer: 'whitespace',
  4. filter: [
  5. 'shingle'
  6. ],
  7. text: 'quick brown fox jumps'
  8. }
  9. )
  10. puts response
  1. const response = await client.indices.analyze({
  2. tokenizer: "whitespace",
  3. filter: ["shingle"],
  4. text: "quick brown fox jumps",
  5. });
  6. console.log(response);
  1. GET /_analyze
  2. {
  3. "tokenizer": "whitespace",
  4. "filter": [ "shingle" ],
  5. "text": "quick brown fox jumps"
  6. }

The filter produces the following tokens:

  1. [ quick, quick brown, brown, brown fox, fox, fox jumps, jumps ]

To produce shingles of 2-3 words, add the following arguments to the analyze API request:

  • min_shingle_size: 2
  • max_shingle_size: 3
  1. resp = client.indices.analyze(
  2. tokenizer="whitespace",
  3. filter=[
  4. {
  5. "type": "shingle",
  6. "min_shingle_size": 2,
  7. "max_shingle_size": 3
  8. }
  9. ],
  10. text="quick brown fox jumps",
  11. )
  12. print(resp)
  1. response = client.indices.analyze(
  2. body: {
  3. tokenizer: 'whitespace',
  4. filter: [
  5. {
  6. type: 'shingle',
  7. min_shingle_size: 2,
  8. max_shingle_size: 3
  9. }
  10. ],
  11. text: 'quick brown fox jumps'
  12. }
  13. )
  14. puts response
  1. const response = await client.indices.analyze({
  2. tokenizer: "whitespace",
  3. filter: [
  4. {
  5. type: "shingle",
  6. min_shingle_size: 2,
  7. max_shingle_size: 3,
  8. },
  9. ],
  10. text: "quick brown fox jumps",
  11. });
  12. console.log(response);
  1. GET /_analyze
  2. {
  3. "tokenizer": "whitespace",
  4. "filter": [
  5. {
  6. "type": "shingle",
  7. "min_shingle_size": 2,
  8. "max_shingle_size": 3
  9. }
  10. ],
  11. "text": "quick brown fox jumps"
  12. }

The filter produces the following tokens:

  1. [ quick, quick brown, quick brown fox, brown, brown fox, brown fox jumps, fox, fox jumps, jumps ]

To only include shingles in the output, add an output_unigrams argument of false to the request.

  1. resp = client.indices.analyze(
  2. tokenizer="whitespace",
  3. filter=[
  4. {
  5. "type": "shingle",
  6. "min_shingle_size": 2,
  7. "max_shingle_size": 3,
  8. "output_unigrams": False
  9. }
  10. ],
  11. text="quick brown fox jumps",
  12. )
  13. print(resp)
  1. response = client.indices.analyze(
  2. body: {
  3. tokenizer: 'whitespace',
  4. filter: [
  5. {
  6. type: 'shingle',
  7. min_shingle_size: 2,
  8. max_shingle_size: 3,
  9. output_unigrams: false
  10. }
  11. ],
  12. text: 'quick brown fox jumps'
  13. }
  14. )
  15. puts response
  1. const response = await client.indices.analyze({
  2. tokenizer: "whitespace",
  3. filter: [
  4. {
  5. type: "shingle",
  6. min_shingle_size: 2,
  7. max_shingle_size: 3,
  8. output_unigrams: false,
  9. },
  10. ],
  11. text: "quick brown fox jumps",
  12. });
  13. console.log(response);
  1. GET /_analyze
  2. {
  3. "tokenizer": "whitespace",
  4. "filter": [
  5. {
  6. "type": "shingle",
  7. "min_shingle_size": 2,
  8. "max_shingle_size": 3,
  9. "output_unigrams": false
  10. }
  11. ],
  12. "text": "quick brown fox jumps"
  13. }

The filter produces the following tokens:

  1. [ quick brown, quick brown fox, brown fox, brown fox jumps, fox jumps ]

Add to an analyzer

The following create index API request uses the shingle filter to configure a new custom analyzer.

  1. resp = client.indices.create(
  2. index="my-index-000001",
  3. settings={
  4. "analysis": {
  5. "analyzer": {
  6. "standard_shingle": {
  7. "tokenizer": "standard",
  8. "filter": [
  9. "shingle"
  10. ]
  11. }
  12. }
  13. }
  14. },
  15. )
  16. print(resp)
  1. response = client.indices.create(
  2. index: 'my-index-000001',
  3. body: {
  4. settings: {
  5. analysis: {
  6. analyzer: {
  7. standard_shingle: {
  8. tokenizer: 'standard',
  9. filter: [
  10. 'shingle'
  11. ]
  12. }
  13. }
  14. }
  15. }
  16. }
  17. )
  18. puts response
  1. const response = await client.indices.create({
  2. index: "my-index-000001",
  3. settings: {
  4. analysis: {
  5. analyzer: {
  6. standard_shingle: {
  7. tokenizer: "standard",
  8. filter: ["shingle"],
  9. },
  10. },
  11. },
  12. },
  13. });
  14. console.log(response);
  1. PUT /my-index-000001
  2. {
  3. "settings": {
  4. "analysis": {
  5. "analyzer": {
  6. "standard_shingle": {
  7. "tokenizer": "standard",
  8. "filter": [ "shingle" ]
  9. }
  10. }
  11. }
  12. }
  13. }

Configurable parameters

max_shingle_size

(Optional, integer) Maximum number of tokens to concatenate when creating shingles. Defaults to 2.

This value cannot be lower than the min_shingle_size argument, which defaults to 2. The difference between this value and the min_shingle_size argument cannot exceed the index.max_shingle_diff index-level setting, which defaults to 3.

min_shingle_size

(Optional, integer) Minimum number of tokens to concatenate when creating shingles. Defaults to 2.

This value cannot exceed the max_shingle_size argument, which defaults to 2. The difference between the max_shingle_size argument and this value cannot exceed the index.max_shingle_diff index-level setting, which defaults to 3.

output_unigrams

(Optional, Boolean) If true, the output includes the original input tokens. If false, the output only includes shingles; the original input tokens are removed. Defaults to true.

output_unigrams_if_no_shingles

If true, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to false.

If both this and the output_unigrams parameter are true, only the output_unigrams argument is used.

token_separator

(Optional, string) Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (" ").

filler_token

(Optional, string) String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (_).

Some token filters, such as the stop filter, create empty positions when removing stop words with a position increment greater than one.

Example

In the following analyze API request, the stop filter removes the stop word a from fox jumps a lazy dog, creating an empty position. The subsequent shingle filter replaces this empty position with a plus sign (+) in shingles.

  1. resp = client.indices.analyze(
  2. tokenizer="whitespace",
  3. filter=[
  4. {
  5. "type": "stop",
  6. "stopwords": [
  7. "a"
  8. ]
  9. },
  10. {
  11. "type": "shingle",
  12. "filler_token": "+"
  13. }
  14. ],
  15. text="fox jumps a lazy dog",
  16. )
  17. print(resp)
  1. response = client.indices.analyze(
  2. body: {
  3. tokenizer: 'whitespace',
  4. filter: [
  5. {
  6. type: 'stop',
  7. stopwords: [
  8. 'a'
  9. ]
  10. },
  11. {
  12. type: 'shingle',
  13. filler_token: '+'
  14. }
  15. ],
  16. text: 'fox jumps a lazy dog'
  17. }
  18. )
  19. puts response
  1. const response = await client.indices.analyze({
  2. tokenizer: "whitespace",
  3. filter: [
  4. {
  5. type: "stop",
  6. stopwords: ["a"],
  7. },
  8. {
  9. type: "shingle",
  10. filler_token: "+",
  11. },
  12. ],
  13. text: "fox jumps a lazy dog",
  14. });
  15. console.log(response);
  1. GET /_analyze
  2. {
  3. "tokenizer": "whitespace",
  4. "filter": [
  5. {
  6. "type": "stop",
  7. "stopwords": [ "a" ]
  8. },
  9. {
  10. "type": "shingle",
  11. "filler_token": "+"
  12. }
  13. ],
  14. "text": "fox jumps a lazy dog"
  15. }

The filter produces the following tokens:

  1. [ fox, fox jumps, jumps, jumps +, + lazy, lazy, lazy dog, dog ]

Customize

To customize the shingle filter, duplicate it to create the basis for a new custom token filter. You can modify the filter using its configurable parameters.

For example, the following create index API request uses a custom shingle filter, my_shingle_filter, to configure a new custom analyzer.

The my_shingle_filter filter uses a min_shingle_size of 2 and a max_shingle_size of 5, meaning it produces shingles of 2-5 words. The filter also includes a output_unigrams argument of false, meaning that only shingles are included in the output.

  1. resp = client.indices.create(
  2. index="my-index-000001",
  3. settings={
  4. "analysis": {
  5. "analyzer": {
  6. "en": {
  7. "tokenizer": "standard",
  8. "filter": [
  9. "my_shingle_filter"
  10. ]
  11. }
  12. },
  13. "filter": {
  14. "my_shingle_filter": {
  15. "type": "shingle",
  16. "min_shingle_size": 2,
  17. "max_shingle_size": 5,
  18. "output_unigrams": False
  19. }
  20. }
  21. }
  22. },
  23. )
  24. print(resp)
  1. response = client.indices.create(
  2. index: 'my-index-000001',
  3. body: {
  4. settings: {
  5. analysis: {
  6. analyzer: {
  7. en: {
  8. tokenizer: 'standard',
  9. filter: [
  10. 'my_shingle_filter'
  11. ]
  12. }
  13. },
  14. filter: {
  15. my_shingle_filter: {
  16. type: 'shingle',
  17. min_shingle_size: 2,
  18. max_shingle_size: 5,
  19. output_unigrams: false
  20. }
  21. }
  22. }
  23. }
  24. }
  25. )
  26. puts response
  1. const response = await client.indices.create({
  2. index: "my-index-000001",
  3. settings: {
  4. analysis: {
  5. analyzer: {
  6. en: {
  7. tokenizer: "standard",
  8. filter: ["my_shingle_filter"],
  9. },
  10. },
  11. filter: {
  12. my_shingle_filter: {
  13. type: "shingle",
  14. min_shingle_size: 2,
  15. max_shingle_size: 5,
  16. output_unigrams: false,
  17. },
  18. },
  19. },
  20. },
  21. });
  22. console.log(response);
  1. PUT /my-index-000001
  2. {
  3. "settings": {
  4. "analysis": {
  5. "analyzer": {
  6. "en": {
  7. "tokenizer": "standard",
  8. "filter": [ "my_shingle_filter" ]
  9. }
  10. },
  11. "filter": {
  12. "my_shingle_filter": {
  13. "type": "shingle",
  14. "min_shingle_size": 2,
  15. "max_shingle_size": 5,
  16. "output_unigrams": false
  17. }
  18. }
  19. }
  20. }
  21. }