Skip to content

Commit f6cbdf5

Browse files
committed
Use global namespaces instead of imports.
1 parent c03c7eb commit f6cbdf5

17 files changed

+1805
-1813
lines changed
Lines changed: 41 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1,53 +1,51 @@
1-
import {CharacterFilter} from "./character_filter";
2-
import {Tokenizer, whitespaceTokenizer} from "./tokenizer";
3-
import {lowercaseTokenFilter, TokenFilter} from "./token_filter";
4-
5-
/**
6-
* A analyzer converts a string into tokens which are added to the inverted index for searching.
7-
*/
8-
export interface Analyzer {
9-
/**
10-
* The character filters of the analyzer.
11-
*/
12-
char_filter?: CharacterFilter[];
1+
namespace LokiDB.FullTextSearch {
132
/**
14-
* The tokenizer of the analyzer.
3+
* A analyzer converts a string into tokens which are added to the inverted index for searching.
154
*/
16-
tokenizer: Tokenizer;
5+
export interface Analyzer {
6+
/**
7+
* The character filters of the analyzer.
8+
*/
9+
char_filter?: CharacterFilter[];
10+
/**
11+
* The tokenizer of the analyzer.
12+
*/
13+
tokenizer: Tokenizer;
14+
/**
15+
* The token filters of the analyzer.
16+
*/
17+
token_filter?: TokenFilter[];
18+
}
19+
1720
/**
18-
* The token filters of the analyzer.
21+
* Analyzes a given string.
22+
* @param {Analyzer} analyzer - the analyzer
23+
* @param {string} str - the string
24+
* @returns {string[]} - the tokens
1925
*/
20-
token_filter?: TokenFilter[];
21-
}
22-
23-
/**
24-
* Analyzes a given string.
25-
* @param {Analyzer} analyzer - the analyzer
26-
* @param {string} str - the string
27-
* @returns {string[]} - the tokens
28-
*/
29-
export function analyze(analyzer: Analyzer, str: string): string[] {
30-
if (analyzer.char_filter) {
31-
for (let j = 0; j < analyzer.char_filter.length; j++) {
32-
str = analyzer.char_filter[j](str);
26+
export function analyze(analyzer: Analyzer, str: string): string[] {
27+
if (analyzer.char_filter) {
28+
for (let j = 0; j < analyzer.char_filter.length; j++) {
29+
str = analyzer.char_filter[j](str);
30+
}
3331
}
34-
}
35-
const tokens = analyzer.tokenizer(str);
36-
if (analyzer.token_filter) {
37-
for (let i = 0; i < tokens.length; i++) {
38-
for (let k = 0; k < analyzer.token_filter.length; k++) {
39-
tokens[i] = analyzer.token_filter[k](tokens[i], i, tokens);
32+
const tokens = analyzer.tokenizer(str);
33+
if (analyzer.token_filter) {
34+
for (let i = 0; i < tokens.length; i++) {
35+
for (let k = 0; k < analyzer.token_filter.length; k++) {
36+
tokens[i] = analyzer.token_filter[k](tokens[i], i, tokens);
37+
}
4038
}
4139
}
40+
// Remove empty tokens.
41+
return tokens.filter((token) => token);
4242
}
43-
// Remove empty tokens.
44-
return tokens.filter((token) => token);
45-
}
4643

47-
/**
48-
* An analyzer with the whitespace tokenizer and the lowercase token filter.
49-
*/
50-
export class StandardAnalyzer implements Analyzer {
51-
tokenizer = whitespaceTokenizer;
52-
token_filter = [lowercaseTokenFilter];
44+
/**
45+
* An analyzer with the whitespace tokenizer and the lowercase token filter.
46+
*/
47+
export class StandardAnalyzer implements Analyzer {
48+
tokenizer = whitespaceTokenizer;
49+
token_filter = [lowercaseTokenFilter];
50+
}
5351
}
Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
/**
2-
* A character filter is used to preprocess a string before it is passed to a tokenizer.
3-
*/
4-
export type CharacterFilter = (value: string) => string;
5-
1+
namespace LokiDB.FullTextSearch {
2+
/**
3+
* A character filter is used to preprocess a string before it is passed to a tokenizer.
4+
*/
5+
export type CharacterFilter = (value: string) => string;
6+
}
67

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,24 @@
1-
/**
2-
* A token filter takes tokens from a tokenizer and modify, delete or add tokens.
3-
*/
4-
export type TokenFilter = (value: string, index: number, array: string[]) => string;
1+
namespace LokiDB.FullTextSearch {
2+
/**
3+
* A token filter takes tokens from a tokenizer and modify, delete or add tokens.
4+
*/
5+
export type TokenFilter = (value: string, index: number, array: string[]) => string;
56

6-
/**
7-
* Converts a token to lowercase.
8-
* @param {string} token - the token
9-
* @returns {string} - the lowercased token
10-
*/
11-
export function lowercaseTokenFilter(token: string): string {
12-
return token.toLowerCase();
13-
}
7+
/**
8+
* Converts a token to lowercase.
9+
* @param {string} token - the token
10+
* @returns {string} - the lowercased token
11+
*/
12+
export function lowercaseTokenFilter(token: string): string {
13+
return token.toLowerCase();
14+
}
1415

15-
/**
16-
* Converts a token to uppercase.
17-
* @param {string} token - the token
18-
* @returns {string} - the uppercased token
19-
*/
20-
export function uppercaseTokenFilter(token: string): string {
21-
return token.toUpperCase();
16+
/**
17+
* Converts a token to uppercase.
18+
* @param {string} token - the token
19+
* @returns {string} - the uppercased token
20+
*/
21+
export function uppercaseTokenFilter(token: string): string {
22+
return token.toUpperCase();
23+
}
2224
}

packages/full-text-search/src/analyzer/tokenizer.ts

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
1-
/**
2-
* A tokenizer splits a string into individual tokens.
3-
*/
4-
export type Tokenizer = (value: string) => string[];
1+
namespace LokiDB.FullTextSearch {
2+
/**
3+
* A tokenizer splits a string into individual tokens.
4+
*/
5+
export type Tokenizer = (value: string) => string[];
56

6-
/**
7-
* Splits a string at whitespace characters into tokens.
8-
* @param {string} value - the string
9-
* @returns {string[]} - the tokens
10-
*/
11-
export function whitespaceTokenizer(value: string): string[] {
12-
return value.split(/[\s]+/);
7+
/**
8+
* Splits a string at whitespace characters into tokens.
9+
* @param {string} value - the string
10+
* @returns {string[]} - the tokens
11+
*/
12+
export function whitespaceTokenizer(value: string): string[] {
13+
return value.split(/[\s]+/);
14+
}
1315
}
14-
Lines changed: 80 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -1,107 +1,101 @@
1-
import {InvertedIndex} from "./inverted_index";
2-
import {IndexSearcher} from "./index_searcher";
3-
import {Dict} from "../../common/types";
4-
import {PLUGINS} from "../../common/plugin";
5-
import {Query} from "./query_types";
6-
import {Scorer} from "./scorer";
7-
import {Analyzer} from "./analyzer/analyzer";
8-
import {Serialization} from "../../loki/src/serialization/migration";
1+
namespace LokiDB.FullTextSearch {
92

10-
export class FullTextSearch {
11-
/// The id field of each document.
12-
private _id: string;
13-
/// Set of ids of all indexed documents.
14-
private _docs: Set<InvertedIndex.DocumentIndex>;
15-
private _idxSearcher: IndexSearcher;
16-
private _invIdxs: Dict<InvertedIndex> = {};
3+
export class FullTextSearch {
4+
/// The id field of each document.
5+
private _id: string;
6+
/// Set of ids of all indexed documents.
7+
private _docs: Set<DocumentIndex>;
8+
private _idxSearcher: IndexSearcher;
9+
private _invIdxs: Dict<InvertedIndex> = {};
1710

18-
/**
19-
* Registers the full-text search as plugin.
20-
*/
21-
public static register(): void {
22-
PLUGINS["FullTextSearch"] = FullTextSearch;
23-
}
11+
/**
12+
* Registers the full-text search as plugin.
13+
*/
14+
public static register(): void {
15+
PLUGINS["FullTextSearch"] = FullTextSearch;
16+
}
2417

25-
/**
26-
* Initialize the full-text search for the given fields.
27-
* @param {object[]} fieldOptions - the field options
28-
* @param {string} fieldOptions.field - the name of the property field
29-
* @param {boolean=true} fieldOptions.store - flag to indicate if the full-text search should be stored on serialization or
30-
* rebuild on deserialization
31-
* @param {boolean=true} fieldOptions.optimizeChanges - flag to optimize updating and deleting of documents
32-
* (requires more memory but performs faster)
33-
* @param {Analyzer} fieldOptions.analyzer - an analyzer for the field
34-
* @param {string} [id] - the property name of the document index
35-
*/
36-
constructor(fieldOptions: FullTextSearch.FieldOptions[] = [], id?: string) {
37-
// Create an inverted index for each field.
38-
for (let i = 0; i < fieldOptions.length; i++) {
39-
let fieldOption = fieldOptions[i];
40-
this._invIdxs[fieldOption.field] = new InvertedIndex(fieldOption);
18+
/**
19+
* Initialize the full-text search for the given fields.
20+
* @param {object[]} fieldOptions - the field options
21+
* @param {string} fieldOptions.field - the name of the property field
22+
* @param {boolean=true} fieldOptions.store - flag to indicate if the full-text search should be stored on serialization or
23+
* rebuild on deserialization
24+
* @param {boolean=true} fieldOptions.optimizeChanges - flag to optimize updating and deleting of documents
25+
* (requires more memory but performs faster)
26+
* @param {Analyzer} fieldOptions.analyzer - an analyzer for the field
27+
* @param {string} [id] - the property name of the document index
28+
*/
29+
constructor(fieldOptions: FullTextSearch.FieldOptions[] = [], id?: string) {
30+
// Create an inverted index for each field.
31+
for (let i = 0; i < fieldOptions.length; i++) {
32+
let fieldOption = fieldOptions[i];
33+
this._invIdxs[fieldOption.field] = new InvertedIndex(fieldOption);
34+
}
35+
this._id = id;
36+
this._docs = new Set();
37+
this._idxSearcher = new IndexSearcher(this._invIdxs, this._docs);
4138
}
42-
this._id = id;
43-
this._docs = new Set();
44-
this._idxSearcher = new IndexSearcher(this._invIdxs, this._docs);
45-
}
4639

47-
public addDocument(doc: object, id: InvertedIndex.DocumentIndex = doc[this._id]): void {
48-
let fieldNames = Object.keys(this._invIdxs);
49-
for (let i = 0, fieldName; i < fieldNames.length, fieldName = fieldNames[i]; i++) {
50-
if (doc[fieldName] !== undefined) {
51-
this._invIdxs[fieldName].insert(doc[fieldName], id);
40+
public addDocument(doc: object, id: DocumentIndex = doc[this._id]): void {
41+
let fieldNames = Object.keys(this._invIdxs);
42+
for (let i = 0, fieldName; i < fieldNames.length, fieldName = fieldNames[i]; i++) {
43+
if (doc[fieldName] !== undefined) {
44+
this._invIdxs[fieldName].insert(doc[fieldName], id);
45+
}
5246
}
47+
this._docs.add(id);
48+
this._idxSearcher.setDirty();
5349
}
54-
this._docs.add(id);
55-
this._idxSearcher.setDirty();
56-
}
5750

58-
public removeDocument(doc: object, id: InvertedIndex.DocumentIndex = doc[this._id]): void {
59-
let fieldNames = Object.keys(this._invIdxs);
60-
for (let i = 0; i < fieldNames.length; i++) {
61-
this._invIdxs[fieldNames[i]].remove(id);
51+
public removeDocument(doc: object, id: DocumentIndex = doc[this._id]): void {
52+
let fieldNames = Object.keys(this._invIdxs);
53+
for (let i = 0; i < fieldNames.length; i++) {
54+
this._invIdxs[fieldNames[i]].remove(id);
55+
}
56+
this._docs.delete(id);
57+
this._idxSearcher.setDirty();
6258
}
63-
this._docs.delete(id);
64-
this._idxSearcher.setDirty();
65-
}
6659

67-
public updateDocument(doc: object, id: InvertedIndex.DocumentIndex = doc[this._id]): void {
68-
this.removeDocument(doc, id);
69-
this.addDocument(doc, id);
70-
}
60+
public updateDocument(doc: object, id: DocumentIndex = doc[this._id]): void {
61+
this.removeDocument(doc, id);
62+
this.addDocument(doc, id);
63+
}
7164

72-
public clear(): void {
73-
for (let id of this._docs) {
74-
this.removeDocument(null, id);
65+
public clear(): void {
66+
for (let id of this._docs) {
67+
this.removeDocument(null, id);
68+
}
7569
}
76-
}
7770

78-
public search(query: Query): Scorer.ScoreResults {
79-
return this._idxSearcher.search(query);
80-
}
71+
public search(query: Query): Scorer.ScoreResults {
72+
return this._idxSearcher.search(query);
73+
}
8174

82-
public toJSON(): Serialization.FullTextSearch {
83-
let serialized = {id: this._id, ii: {}};
84-
let fieldNames = Object.keys(this._invIdxs);
85-
for (let i = 0; i < fieldNames.length; i++) {
86-
const fieldName = fieldNames[i];
87-
serialized.ii[fieldName] = this._invIdxs[fieldName].toJSON();
75+
public toJSON(): Serialization.FullTextSearch {
76+
let serialized = {id: this._id, ii: {}};
77+
let fieldNames = Object.keys(this._invIdxs);
78+
for (let i = 0; i < fieldNames.length; i++) {
79+
const fieldName = fieldNames[i];
80+
serialized.ii[fieldName] = this._invIdxs[fieldName].toJSON();
81+
}
82+
return serialized;
8883
}
89-
return serialized;
90-
}
9184

92-
public static fromJSONObject(serialized: Serialization.FullTextSearch, analyzers: Dict<Analyzer> = {}): FullTextSearch {
93-
let fts = new FullTextSearch([], serialized.id);
94-
let fieldNames = Object.keys(serialized.ii);
95-
for (let i = 0; i < fieldNames.length; i++) {
96-
const fieldName = fieldNames[i];
97-
fts._invIdxs[fieldName] = InvertedIndex.fromJSONObject(serialized.ii[fieldName], analyzers[fieldName]);
85+
public static fromJSONObject(serialized: Serialization.FullTextSearch, analyzers: Dict<Analyzer> = {}): FullTextSearch {
86+
let fts = new FullTextSearch([], serialized.id);
87+
let fieldNames = Object.keys(serialized.ii);
88+
for (let i = 0; i < fieldNames.length; i++) {
89+
const fieldName = fieldNames[i];
90+
fts._invIdxs[fieldName] = InvertedIndex.fromJSONObject(serialized.ii[fieldName], analyzers[fieldName]);
91+
}
92+
return fts;
9893
}
99-
return fts;
10094
}
101-
}
10295

103-
export namespace FullTextSearch {
104-
export interface FieldOptions extends InvertedIndex.FieldOptions {
105-
field: string;
96+
export namespace FullTextSearch {
97+
export interface FieldOptions extends InvertedIndex.FieldOptions {
98+
field: string;
99+
}
106100
}
107101
}

0 commit comments

Comments
 (0)