diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
index 83c341e541..5cfc5b420d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
@@ -28,13 +28,13 @@ namespace Lucene.Net.Analysis.Br
*/
///
- /// for Brazilian Portuguese language.
+ /// for Brazilian Portuguese language.
///
/// Supports an external list of stopwords (words that
/// will not be indexed at all) and an external list of exclusions (words that will
/// not be stemmed, but indexed).
///
- ///
+ ///
/// NOTE : This class uses the same
/// dependent settings as .
///
@@ -77,7 +77,7 @@ private static CharArraySet LoadDefaultStopSet() // LUCENENET: Avoid static cons
///
/// Contains words that should be indexed but not stemmed.
///
- private CharArraySet excltable = CharArraySet.Empty;
+ private readonly CharArraySet excltable = CharArraySet.Empty; // LUCENENET: marked readonly
///
/// Builds an analyzer with the default stop words ( ).
@@ -135,4 +135,4 @@ protected internal override TokenStreamComponents CreateComponents(string fieldN
return new TokenStreamComponents(source, new BrazilianStemFilter(result));
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs
index d18149ee2e..899c79588d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs
@@ -30792,19 +30792,19 @@ CharArrayDictionary result
private static readonly char STYLE_REPLACEMENT = '\n';
private static readonly char REPLACEMENT_CHARACTER = '\uFFFD';
- private CharArraySet escapedTags = null;
+ private readonly CharArraySet escapedTags = null; // LUCENENET: marked readonly
private int inputStart;
private int cumulativeDiff;
- private bool escapeBR = false;
- private bool escapeSCRIPT = false;
- private bool escapeSTYLE = false;
+ private readonly bool escapeBR = false; // LUCENENET: marked readonly
+ private readonly bool escapeSCRIPT = false; // LUCENENET: marked readonly
+ private readonly bool escapeSTYLE = false; // LUCENENET: marked readonly
private int restoreState;
private int previousRestoreState;
private int outputCharCount;
private int eofReturnValue;
- private TextSegment inputSegment = new TextSegment(INITIAL_INPUT_SEGMENT_SIZE);
+ private readonly TextSegment inputSegment = new TextSegment(INITIAL_INPUT_SEGMENT_SIZE); // LUCENENET: marked readonly
private TextSegment outputSegment;
- private TextSegment entitySegment = new TextSegment(2);
+ private readonly TextSegment entitySegment = new TextSegment(2); // LUCENENET: marked readonly
///
/// Creates a new HTMLStripCharFilter over the provided TextReader.
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
index 739c30b42a..1e2284f715 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
@@ -35,13 +35,13 @@ namespace Lucene.Net.Analysis.Cjk
///
public sealed class CJKWidthFilter : TokenFilter
{
- private ICharTermAttribute termAtt;
+ private readonly ICharTermAttribute termAtt; // LUCENENET: marked readonly
///
- /// halfwidth kana mappings: 0xFF65-0xFF9D
+ /// halfwidth kana mappings: 0xFF65-0xFF9D
///
/// note: 0xFF9C and 0xFF9D are only mapped to 0x3099 and 0x309A
- /// as a fallback when they cannot properly combine with a preceding
+ /// as a fallback when they cannot properly combine with a preceding
/// character into a composed form.
///
private static readonly char[] KANA_NORM = new char[] {
@@ -124,4 +124,4 @@ private static bool Combine(char[] text, int pos, char ch)
return false;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
index 36d0a76ea9..ce44bc03d4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Cn
*/
///
- /// A with a stop word table.
+ /// A with a stop word table.
///
/// Numeric tokens are removed.
/// English tokens must be larger than 1 character.
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Cn
///
///
/// @deprecated (3.1) Use instead, which has the same functionality.
- /// This filter will be removed in Lucene 5.0
+ /// This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use StopFilter instead, which has the same functionality.")]
public sealed class ChineseFilter : TokenFilter
{
@@ -52,9 +52,9 @@ public sealed class ChineseFilter : TokenFilter
"they", "this", "to", "was", "will", "with"
};
- private CharArraySet stopTable;
+ private readonly CharArraySet stopTable; // LUCENENET: marked readonly
- private ICharTermAttribute termAtt;
+ private readonly ICharTermAttribute termAtt; // LUCENENET: marked readonly
public ChineseFilter(TokenStream @in)
: base(@in)
@@ -97,4 +97,4 @@ public override bool IncrementToken()
return false;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/ByteVector.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/ByteVector.cs
index aa025163b6..82a0fef094 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/ByteVector.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/ByteVector.cs
@@ -11,9 +11,9 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
///
/// This class implements a simple byte vector with access to the underlying
/// array.
- /// This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified.
+ /// This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified.
///
public class ByteVector
{
@@ -33,7 +33,7 @@ public class ByteVector
///
private const int DEFAULT_BLOCK_SIZE = 2048;
- private int blockSize;
+ private readonly int blockSize; // LUCENENET: marked readonly
///
/// The encapsulated array
@@ -45,7 +45,7 @@ public class ByteVector
///
private int n;
- public ByteVector()
+ public ByteVector()
: this(DEFAULT_BLOCK_SIZE)
{
}
@@ -148,4 +148,4 @@ public virtual void TrimToSize()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/CharVector.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/CharVector.cs
index 854e053bbe..0bac76692b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/CharVector.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/CharVector.cs
@@ -12,9 +12,9 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -25,8 +25,8 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
///
/// This class implements a simple char vector with access to the underlying
/// array.
- ///
- /// This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified.
+ ///
+ /// This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified.
///
public class CharVector // LUCENENET specific: Not implementing ICloneable per Microsoft's recommendation
{
@@ -35,7 +35,7 @@ public class CharVector // LUCENENET specific: Not implementing ICloneable per M
///
private const int DEFAULT_BLOCK_SIZE = 2048;
- private int blockSize;
+ private readonly int blockSize; // LUCENENET: marked readonly
///
/// The encapsulated array
@@ -47,7 +47,7 @@ public class CharVector // LUCENENET specific: Not implementing ICloneable per M
///
private int n;
- public CharVector()
+ public CharVector()
: this(DEFAULT_BLOCK_SIZE)
{
}
@@ -160,4 +160,4 @@ public virtual void TrimToSize()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/PatternParser.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/PatternParser.cs
index 520e87b707..3f5b829797 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/PatternParser.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/PatternParser.cs
@@ -40,7 +40,7 @@ public class PatternParser
internal IPatternConsumer consumer;
- internal StringBuilder token;
+ internal readonly StringBuilder token; // LUCENENET: marked readonly
internal IList exception;
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
index 1f439e2862..dd6dbe6161 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
@@ -15,9 +15,9 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
///
/// Ternary Search Tree.
- ///
+ ///
///
/// A ternary search tree is a hybrid between a binary tree and a digital search
/// tree (trie). Keys are limited to strings. A data value of type char is stored
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
/// trie. Performance is comparable with a hash table, sometimes it outperforms a
/// hash function (most of the time can determine a miss faster than a hash).
///
- ///
+ ///
///
/// The main purpose of this java port is to serve as a base for implementing
/// TeX's hyphenation algorithm (see The TeXBook, appendix H). Each language
@@ -56,15 +56,15 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
/// tests the english patterns took 7694 nodes and the german patterns 10055
/// nodes, so I think we are safe.
///
- ///
+ ///
///
/// All said, this is a map with strings as keys and char as value. Pretty
/// limited!. It can be extended to a general map by using the string
/// representation of an object and using the char value as an index to an array
/// that contains the object values.
///
- ///
- /// This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified.
+ ///
+ /// This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified.
///
public class TernaryTree // LUCENENET specific: Not implementing ICloneable per Microsoft's recommendation
@@ -475,7 +475,7 @@ public virtual void Balance()
/// tree is traversed to find the key substrings actually used. In addition,
/// duplicate substrings are removed using a map (implemented with a
/// TernaryTree!).
- ///
+ ///
///
public virtual void TrimToSize()
{
@@ -560,7 +560,7 @@ public class Enumerator : IEnumerator
private class Item // LUCENENET specific: Not implementing ICloneable per Microsoft's recommendation
{
- internal char parent;
+ internal readonly char parent; // LUCENENET: marked readonly
internal char child;
// LUCENENET: This constructor is unnecessary
@@ -814,4 +814,4 @@ public static void main(String[] args) {
*/
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
index e9849b7641..bbb0e0fec9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.En
/// You must specify the required
/// compatibility when creating :
///
- /// As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and
+ /// As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and
/// U+FF07 FULLWIDTH APOSTROPHE are also treated as
/// quotation marks.
///
@@ -36,16 +36,16 @@ namespace Lucene.Net.Analysis.En
public sealed class EnglishPossessiveFilter : TokenFilter
{
private readonly ICharTermAttribute termAtt;
- private LuceneVersion matchVersion;
+ private readonly LuceneVersion matchVersion; // LUCENENET: marked readonly
- /// @deprecated Use instead.
+ /// @deprecated Use instead.
[Obsolete(@"Use instead.")]
- public EnglishPossessiveFilter(TokenStream input)
+ public EnglishPossessiveFilter(TokenStream input)
: this(LuceneVersion.LUCENE_35, input)
{
}
- public EnglishPossessiveFilter(LuceneVersion version, TokenStream input)
+ public EnglishPossessiveFilter(LuceneVersion version, TokenStream input)
: base(input)
{
this.matchVersion = version;
@@ -73,4 +73,4 @@ public override bool IncrementToken()
return true;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs
index f4c395b687..c0cf949b6b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs
@@ -329,8 +329,8 @@ public class KStemmer
internal class DictEntry
{
- internal bool exception;
- internal string root;
+ internal readonly bool exception; // LUCENENET: marked readonly
+ internal readonly string root; // LUCENENET: marked readonly
internal DictEntry(string root, bool isException)
{
@@ -341,8 +341,8 @@ internal DictEntry(string root, bool isException)
private static readonly CharArrayDictionary dict_ht = InitializeDictHash();
- // caching off
- //
+ // caching off
+ //
// private int maxCacheSize; private CharArrayDictionary{String} cache =
// null; private static final String SAME = "SAME"; // use if stemmed form is
// the same
@@ -809,9 +809,9 @@ private void PastTense()
k = j + 1;
DictEntry entry = WordInDict();
- if (entry != null)
+ if (entry != null)
{
- if (!entry.exception)
+ if (!entry.exception)
{
// if it's in the dictionary and
// not an exception
@@ -1882,7 +1882,7 @@ internal virtual bool Stem(char[] term, int len)
//*
// caching off is normally faster if (cache is null) initializeStemHash();
- //
+ //
// // now check the cache, before we copy chars to "word" if (cache != null)
// { String val = cache.get(term, 0, len); if (val != null) { if (val !=
// SAME) { result = val; return true; } return false; } }
@@ -2026,7 +2026,7 @@ internal virtual bool Stem(char[] term, int len)
// if (entry is null) { if (!word.toString().equals(new String(term,0,len), StringComparison.Ordinal))
// { System.out.println("CASE:" + word.toString() + "," + new
// String(term,0,len));
- //
+ //
// } }
// **
@@ -2034,4 +2034,4 @@ internal virtual bool Stem(char[] term, int len)
return true;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
index 3055bbe2b6..2bcda79add 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
@@ -74,14 +74,14 @@ public class Dictionary
// all condition checks used by prefixes and suffixes. these are typically re-used across
// many affix stripping rules. so these are deduplicated, to save RAM.
- internal IList patterns = new JCG.List();
+ internal readonly IList patterns = new JCG.List(); // LUCENENET: marked readonly
// the entries in the .dic file, mapping to their set of flags.
// the fst output is the ordinal list for flagLookup
- internal FST words;
+ internal readonly FST words; // LUCENENET: marked readonly
// the list of unique flagsets (wordforms). theoretically huge, but practically
// small (e.g. for polish this is 756), otherwise humans wouldn't be able to deal with it either.
- internal BytesRefHash flagLookup = new BytesRefHash();
+ internal readonly BytesRefHash flagLookup = new BytesRefHash(); // LUCENENET: marked readonly
// the list of unique strip affixes.
internal char[] stripData;
@@ -111,7 +111,7 @@ public class Dictionary
// LUCENENET specific - changed from DirectoryInfo to string
private readonly string tempDir = OfflineSorter.DefaultTempDir; // TODO: make this configurable?
- internal bool ignoreCase;
+ internal readonly bool ignoreCase; // LUCENENET: marked readonly
internal bool complexPrefixes;
internal bool twoStageAffix; // if no affixes have continuation classes, no need to do 2-level affix stripping
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
index c7452bc40a..f86cc9a2d3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Lv
///
/// Only explicitly stems noun and adjective morphology
/// Stricter length/vowel checks for the resulting stems (verb etc suffix stripping is removed)
- /// Removes only the primary inflectional suffixes: case and number for nouns ;
+ /// Removes only the primary inflectional suffixes: case and number for nouns ;
/// case, number, gender, and definitiveness for adjectives.
/// Palatalization is only handled when a declension II,V,VI noun suffix is removed.
///
@@ -80,9 +80,9 @@ public virtual int Stem(char[] s, int len)
internal class Affix
{
- internal char[] affix; // suffix
- internal int vc; // vowel count of the suffix
- internal bool palatalizes; // true if we should fire palatalization rules.
+ internal readonly char[] affix; // suffix // LUCENENET: marked readonly
+ internal readonly int vc; // vowel count of the suffix // LUCENENET: marked readonly
+ internal readonly bool palatalizes; // true if we should fire palatalization rules. // LUCENENET: marked readonly
internal Affix(string affix, int vc, bool palatalizes)
{
@@ -103,7 +103,7 @@ internal Affix(string affix, int vc, bool palatalizes)
///
private static int Unpalatalize(char[] s, int len) // LUCENENET: CA1822: Mark members as static
{
- // we check the character removed: if its -u then
+ // we check the character removed: if its -u then
// its 2,5, or 6 gen pl., and these two can only apply then.
if (s[len] == 'u')
{
@@ -212,4 +212,4 @@ private static int NumVowels(char[] s, int len) // LUCENENET: CA1822: Mark membe
return n;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
index 91e592c5eb..71e8d5f1d0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
@@ -42,17 +42,17 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// assumed to be correct.
/// "culture" - the culture to use to apply the capitalization rules. If not supplied or the string
/// "invariant" is supplied, the invariant culture is used.
- ///
+ ///
///
/// <fieldType name="text_cptlztn" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.CapitalizationFilterFactory" onlyFirstWord="true"
/// keep="java solr lucene" keepIgnoreCase="false"
- /// okPrefix="McK McD McA"/>
+ /// okPrefix="McK McD McA"/>
/// </analyzer>
/// </fieldType>
- ///
+ ///
/// @since solr 1.3
///
public class CapitalizationFilterFactory : TokenFilterFactory
@@ -67,9 +67,9 @@ public class CapitalizationFilterFactory : TokenFilterFactory
public const string FORCE_FIRST_LETTER = "forceFirstLetter";
public const string CULTURE = "culture"; // LUCENENET specific
- internal CharArraySet keep;
+ internal readonly CharArraySet keep; // LUCENENET: marked readonly
- internal ICollection okPrefix = Collections.EmptyList(); // for Example: McK
+ internal readonly ICollection okPrefix = Collections.EmptyList(); // for Example: McK // LUCENENET: marked readonly
internal readonly int minWordLength; // don't modify capitalization for words shorter then this
internal readonly int maxWordCount;
@@ -119,4 +119,4 @@ public override TokenStream Create(TokenStream input)
return new CapitalizationFilter(input, onlyFirstWord, keep, forceFirstLetter, okPrefix, minWordLength, maxWordCount, maxTokenLength, culture);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
index 439fc0f04a..d02e122973 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
@@ -55,7 +55,7 @@ public class WordDelimiterFilterFactory : TokenFilterFactory, IResourceLoaderAwa
///
/// Creates a new
- public WordDelimiterFilterFactory(IDictionary args)
+ public WordDelimiterFilterFactory(IDictionary args)
: base(args)
{
AssureMatchVersion();
@@ -213,7 +213,7 @@ private static byte ParseType(string s) // LUCENENET: CA1822: Mark members as st
}
}
- internal char[] @out = new char[256];
+ internal readonly char[] @out = new char[256]; // LUCENENET: marked readonly
private string ParseString(string s)
{
@@ -266,4 +266,4 @@ private string ParseString(string s)
return new string(@out, 0, writePos);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
index 35172b8ba9..705bc7d88c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
@@ -112,7 +112,7 @@ private static CharArrayDictionary LoadDefaultStemDict() // LUCENENET: A
///
/// Contains words that should be indexed but not stemmed.
///
- private CharArraySet excltable = CharArraySet.Empty;
+ private readonly CharArraySet excltable = CharArraySet.Empty; // LUCENENET: marked readonly
private readonly StemmerOverrideFilter.StemmerOverrideMap stemdict;
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
index d12b682b2c..22fa7210b5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Pattern
*/
///
- /// Factory for .
+ /// Factory for .
///
/// <fieldType name="text_ptncapturegroup" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
@@ -35,10 +35,10 @@ namespace Lucene.Net.Analysis.Pattern
///
public class PatternCaptureGroupFilterFactory : TokenFilterFactory
{
- private Regex pattern;
- private bool preserveOriginal = true;
+ private readonly Regex pattern; // LUCENENET: marked readonly
+ private readonly bool preserveOriginal /*= true*/; // LUCENENET: marked readonly, removed overwritten initializer
- public PatternCaptureGroupFilterFactory(IDictionary args)
+ public PatternCaptureGroupFilterFactory(IDictionary args)
: base(args)
{
pattern = GetPattern(args, "pattern");
@@ -50,4 +50,4 @@ public override TokenStream Create(TokenStream input)
return new PatternCaptureGroupTokenFilter(input, preserveOriginal, pattern);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
index 838aaef4b3..5ac17cd14a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
@@ -27,20 +27,20 @@ namespace Lucene.Net.Analysis.Pattern
///
/// CaptureGroup uses .NET regexes to emit multiple tokens - one for each capture
/// group in one or more patterns.
- ///
+ ///
///
/// For example, a pattern like:
///
- ///
+ ///
///
/// "(https?://([a-zA-Z\-_0-9.]+))"
///
- ///
+ ///
///
/// when matched against the string "http://www.foo.com/index" would return the
/// tokens "https://www.foo.com" and "www.foo.com".
///
- ///
+ ///
///
/// If none of the patterns match, or if preserveOriginal is true, the original
/// token will be preserved.
@@ -55,9 +55,9 @@ namespace Lucene.Net.Analysis.Pattern
///
///
///
- /// "([A-Z]{2,})",
- /// "(?<![A-Z])([A-Z][a-z]+)",
- /// "(?:^|\\b|(?<=[0-9_])|(?<=[A-Z]{2}))([a-z]+)",
+ /// "([A-Z]{2,})",
+ /// "(?<![A-Z])([A-Z][a-z]+)",
+ /// "(?:^|\\b|(?<=[0-9_])|(?<=[A-Z]{2}))([a-z]+)",
/// "([0-9]+)"
///
///
@@ -76,7 +76,7 @@ public sealed class PatternCaptureGroupTokenFilter : TokenFilter
private readonly CharsRef spare = new CharsRef();
private readonly int[] groupCounts;
private readonly bool preserveOriginal;
- private int[] currentGroup;
+ private readonly int[] currentGroup; // LUCENENET: marked readonly
private int currentMatcher;
///
@@ -89,7 +89,7 @@ public sealed class PatternCaptureGroupTokenFilter : TokenFilter
/// patterns matches
///
/// an array of objects to match against each token
- public PatternCaptureGroupTokenFilter(TokenStream input, bool preserveOriginal, params Regex[] patterns)
+ public PatternCaptureGroupTokenFilter(TokenStream input, bool preserveOriginal, params Regex[] patterns)
: base(input)
{
this.preserveOriginal = preserveOriginal;
@@ -118,7 +118,7 @@ private bool NextCapture()
if (currentGroup[i] == -1)
{
if (matchers[i] is null)
- matchers[i] = patterns[i].Match(new string(spare.Chars, spare.Offset, spare.Length));
+ matchers[i] = patterns[i].Match(new string(spare.Chars, spare.Offset, spare.Length));
else
matchers[i] = matchers[i].NextMatch();
currentGroup[i] = matchers[i].Success ? 1 : 0;
@@ -160,7 +160,7 @@ public override bool IncrementToken()
ClearAttributes();
RestoreState(state);
int start = matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Index;
- int end = matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Index +
+ int end = matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Index +
matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Length;
posAttr.PositionIncrement = 0;
@@ -192,7 +192,7 @@ public override bool IncrementToken()
else if (NextCapture())
{
int start = matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Index;
- int end = matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Index +
+ int end = matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Index +
matchers[currentMatcher].Groups[currentGroup[currentMatcher]].Length;
// if we start at 0 we can simply set the length and save the copy
@@ -217,4 +217,4 @@ public override void Reset()
currentMatcher = -1;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
index ceccf68f53..71b4be9bfc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
@@ -27,13 +27,13 @@ namespace Lucene.Net.Analysis.Payloads
///
public class NumericPayloadTokenFilter : TokenFilter
{
- private string typeMatch;
- private BytesRef thePayload;
+ private readonly string typeMatch; // LUCENENET: marked readonly
+ private readonly BytesRef thePayload; // LUCENENET: marked readonly
private readonly IPayloadAttribute payloadAtt;
private readonly ITypeAttribute typeAtt;
- public NumericPayloadTokenFilter(TokenStream input, float payload, string typeMatch)
+ public NumericPayloadTokenFilter(TokenStream input, float payload, string typeMatch)
: base(input)
{
if (typeMatch is null)
@@ -63,4 +63,4 @@ public override sealed bool IncrementToken()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
index 7a9940850a..d5fe31f062 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
@@ -28,26 +28,26 @@ namespace Lucene.Net.Analysis.Snowball
*/
///
- /// Filters with ,
+ /// Filters with ,
/// , and .
- ///
+ ///
/// Available stemmers are listed in org.tartarus.snowball.ext. The name of a
/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
/// is named "English".
- ///
+ ///
/// NOTE : This class uses the same
/// dependent settings as , with the following addition:
///
/// As of 3.1, uses for Turkish language.
///
///
- /// @deprecated (3.1) Use the language-specific analyzer in modules/analysis instead.
- /// This analyzer will be removed in Lucene 5.0
+ /// @deprecated (3.1) Use the language-specific analyzer in modules/analysis instead.
+ /// This analyzer will be removed in Lucene 5.0
[Obsolete("(3.1) Use the language-specific analyzer in modules/analysis instead. This analyzer will be removed in Lucene 5.0.")]
public sealed class SnowballAnalyzer : Analyzer
{
- private string name;
- private CharArraySet stopSet;
+ private readonly string name; // LUCENENET: marked readonly
+ private readonly CharArraySet stopSet; // LUCENENET: marked readonly
private readonly LuceneVersion matchVersion;
///
@@ -66,9 +66,9 @@ public SnowballAnalyzer(LuceneVersion matchVersion, string name, CharArraySet st
}
///
- /// Constructs a filtered by a
+ /// Constructs a filtered by a
/// , a , a ,
- /// and a
+ /// and a
///
protected internal override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
@@ -96,4 +96,4 @@ protected internal override TokenStreamComponents CreateComponents(string fieldN
return new TokenStreamComponents(tokenizer, result);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
index c319c7d79c..cc0a0de10b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
@@ -91,7 +91,7 @@ internal class MapEntry
{
internal bool includeOrig;
// we could sort for better sharing ultimately, but it could confuse people
- internal JCG.List ords = new JCG.List();
+ internal readonly JCG.List ords = new JCG.List(); // LUCENENET: marked readonly
}
///
diff --git a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs
index 79a40c1b74..5cff821f6f 100644
--- a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs
+++ b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs
@@ -46,7 +46,7 @@ namespace Lucene.Net.Analysis.Phonetic.Language.Bm
///
public class PhoneticEngine
{
- internal Regex WHITESPACE = new Regex("\\s+", RegexOptions.Compiled);
+ internal static readonly Regex WHITESPACE = new Regex("\\s+", RegexOptions.Compiled); // LUCENENET: marked static readonly
///
/// Utility for manipulating a set of phonemes as they are being built up. Not intended for use outside
diff --git a/src/Lucene.Net.Analysis.Phonetic/PhoneticFilterFactory.cs b/src/Lucene.Net.Analysis.Phonetic/PhoneticFilterFactory.cs
index 0110dd7f41..dfa27d88b3 100644
--- a/src/Lucene.Net.Analysis.Phonetic/PhoneticFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Phonetic/PhoneticFilterFactory.cs
@@ -55,7 +55,7 @@ namespace Lucene.Net.Analysis.Phonetic
///
///
///
- ///
+ ///
///
/// <fieldType name="text_phonetic" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
@@ -87,7 +87,7 @@ public class PhoneticFilterFactory : TokenFilterFactory, IResourceLoaderAware
{ "ColognePhonetic".ToUpperInvariant(), typeof(ColognePhonetic) },
};
- internal bool inject; //accessed by the test
+ internal readonly bool inject; //accessed by the test // LUCENENET: marked readonly
private readonly string name;
private readonly int? maxCodeLength;
private Type clazz = null;
diff --git a/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Row.cs b/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Row.cs
index 22b083f558..0d1867c9f3 100644
--- a/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Row.cs
+++ b/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Row.cs
@@ -65,7 +65,7 @@ namespace Egothor.Stemmer
///
public class Row
{
- internal IDictionary cells = new JCG.SortedDictionary();
+ internal readonly IDictionary cells = new JCG.SortedDictionary(); // LUCENENET: marked readonly
internal int uniformCnt = 0;
internal int uniformSkip = 0;
diff --git a/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Trie.cs b/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Trie.cs
index 5ebe445fc7..34262d604e 100644
--- a/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Trie.cs
+++ b/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/Trie.cs
@@ -71,9 +71,9 @@ namespace Egothor.Stemmer
///
public class Trie
{
- internal IList rows = new JCG.List();
- internal IList cmds = new JCG.List();
- internal int root;
+ internal readonly IList rows = new JCG.List(); // LUCENENET: marked readonly
+ internal readonly IList cmds = new JCG.List(); // LUCENENET: marked readonly
+ internal readonly int root; // LUCENENET: marked readonly
internal bool forward = false;
diff --git a/src/Lucene.Net.Benchmark/ByTask/Feeds/DirContentSource.cs b/src/Lucene.Net.Benchmark/ByTask/Feeds/DirContentSource.cs
index 06bb70fb4a..9024e59f87 100644
--- a/src/Lucene.Net.Benchmark/ByTask/Feeds/DirContentSource.cs
+++ b/src/Lucene.Net.Benchmark/ByTask/Feeds/DirContentSource.cs
@@ -82,7 +82,7 @@ which reverses again */
internal int count = 0;
- internal Stack stack = new Stack();
+ internal readonly Stack stack = new Stack(); // LUCENENET: marked readonly
/* this seems silly ... there must be a better way ...
not that this is good, but can it matter? */
diff --git a/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs b/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs
index 2a10caefad..7fe0381f06 100644
--- a/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs
+++ b/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs
@@ -41,7 +41,7 @@ namespace Lucene.Net.Benchmarks.ByTask.Feeds
/// doc.body.tokenized specifies whether the body field should be tokenized (default = doc.tokenized ).
/// doc.tokenized.norms specifies whether norms should be stored in the index or not. (default false ).
/// doc.body.tokenized.norms
- /// specifies whether norms should be stored in the index for the body field.
+ /// specifies whether norms should be stored in the index for the body field.
/// This can be set to true, while doc.tokenized.norms is set to false, to allow norms storing just
/// for the body field. (default true ).
///
@@ -80,7 +80,7 @@ protected class DocState
private readonly IDictionary numericFields;
private readonly bool reuseFields;
internal readonly Document doc;
- internal DocData docData = new DocData();
+ internal readonly DocData docData = new DocData(); // LUCENENET: marked readonly
public DocState(bool reuseFields, FieldType ft, FieldType bodyFt)
{
@@ -275,7 +275,7 @@ private Document CreateDocument(DocData docData, int size, int cnt)
if (date is null)
{
// just set to right now
- date = DateTime.Now;
+ date = DateTime.Now;
}
Field dateField = ds.GetNumericField(DATE_MSEC_FIELD, NumericType.INT64);
diff --git a/src/Lucene.Net.Benchmark/ByTask/Stats/TaskStats.cs b/src/Lucene.Net.Benchmark/ByTask/Stats/TaskStats.cs
index 5c8014c0eb..2ee81876e5 100644
--- a/src/Lucene.Net.Benchmark/ByTask/Stats/TaskStats.cs
+++ b/src/Lucene.Net.Benchmark/ByTask/Stats/TaskStats.cs
@@ -24,9 +24,9 @@ namespace Lucene.Net.Benchmarks.ByTask.Stats
*/
///
- /// Statistics for a task run.
+ /// Statistics for a task run.
///
- /// The same task can run more than once, but, if that task records statistics,
+ /// The same task can run more than once, but, if that task records statistics,
/// each run would create its own TaskStats.
///
public class TaskStats // LUCENENET specific: Not implementing ICloneable per Microsoft's recommendation
@@ -38,7 +38,7 @@ public class TaskStats // LUCENENET specific: Not implementing ICloneable per Mi
private int round;
/// Task start time.
- private long start;
+ private readonly long start; // LUCENENET: marked readonly
/// Task elapsed time. elapsed >= 0 indicates run completion!
private long elapsed = -1;
@@ -58,13 +58,13 @@ public class TaskStats // LUCENENET specific: Not implementing ICloneable per Mi
///
/// Number of work items done by this task.
/// For indexing that can be number of docs added.
- /// For warming that can be number of scanned items, etc.
+ /// For warming that can be number of scanned items, etc.
/// For repeating tasks, this is a sum over repetitions.
///
private int count;
///
- /// Number of similar tasks aggregated into this record.
+ /// Number of similar tasks aggregated into this record.
/// Used when summing up on few runs/instances of similar tasks.
///
private int numRuns = 1;
@@ -115,7 +115,7 @@ public virtual void SetCountsByTime(int[] counts, long msecStep)
[WritableArray]
public virtual int[] GetCountsByTime()
{
- return countsByTime;
+ return countsByTime;
}
public virtual long CountsByTimeStepMSec => countsByTimeStepMSec;
@@ -168,7 +168,7 @@ public virtual void Add(TaskStats stat2)
count += stat2.Count;
if (round != stat2.round)
{
- round = -1; // no meaning if aggregating tasks of different round.
+ round = -1; // no meaning if aggregating tasks of different round.
}
if (countsByTime != null && stat2.countsByTime != null)
diff --git a/src/Lucene.Net.Benchmark/Quality/Trec/TrecJudge.cs b/src/Lucene.Net.Benchmark/Quality/Trec/TrecJudge.cs
index 4bcffa8a7b..25c9a6f15e 100644
--- a/src/Lucene.Net.Benchmark/Quality/Trec/TrecJudge.cs
+++ b/src/Lucene.Net.Benchmark/Quality/Trec/TrecJudge.cs
@@ -106,7 +106,7 @@ public virtual bool IsRelevant(string docName, QualityQuery query)
///
private class QRelJudgement
{
- internal string queryID;
+ internal readonly string queryID; // LUCENENET: marked readonly
private readonly IDictionary relevantDocs; // LUCENENET: marked readonly
internal QRelJudgement(string queryID)
diff --git a/src/Lucene.Net.Benchmark/Quality/Utils/QualityQueriesFinder.cs b/src/Lucene.Net.Benchmark/Quality/Utils/QualityQueriesFinder.cs
index 01ebc8a5d3..98bd9ec694 100644
--- a/src/Lucene.Net.Benchmark/Quality/Utils/QualityQueriesFinder.cs
+++ b/src/Lucene.Net.Benchmark/Quality/Utils/QualityQueriesFinder.cs
@@ -146,8 +146,8 @@ private string[] BestTerms(string field, int numTerms)
private class TermDf
{
- internal string word;
- internal int df;
+ internal readonly string word; // LUCENENET: marked readonly
+ internal readonly int df; // LUCENENET: marked readonly
internal TermDf(string word, int freq)
{
this.word = word;
diff --git a/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs b/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs
index 44745aa482..b907ff2d77 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs
@@ -254,7 +254,7 @@ internal abstract class BaseTermsEnum : TermsEnum
/// Current term stats + undecoded metadata (long[] & byte[]).
internal FSTTermOutputs.TermData meta;
- internal ByteArrayDataInput bytesReader;
+ internal readonly ByteArrayDataInput bytesReader; // LUCENENET: marked readonly
///
/// Decodes metadata into customized term state.
@@ -860,4 +860,4 @@ public override void CheckIntegrity()
postingsReader.CheckIntegrity();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs b/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs
index a86b8312c9..91da8e843d 100644
--- a/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs
@@ -138,7 +138,7 @@ public PostingsWriter(MemoryPostingsFormat.TermsWriter outerInstance)
// NOTE: not private so we don't pay access check at runtime:
internal int docCount;
- internal RAMOutputStream buffer = new RAMOutputStream();
+ internal readonly RAMOutputStream buffer = new RAMOutputStream(); // LUCENENET: marked readonly
private int lastOffsetLength;
private int lastOffset;
diff --git a/src/Lucene.Net.Expressions/ExpressionRescorer.cs b/src/Lucene.Net.Expressions/ExpressionRescorer.cs
index 5a948e2268..2f15d3f26f 100644
--- a/src/Lucene.Net.Expressions/ExpressionRescorer.cs
+++ b/src/Lucene.Net.Expressions/ExpressionRescorer.cs
@@ -52,7 +52,7 @@ private class FakeScorer : Scorer
{
internal float score;
internal int doc = -1;
- internal int freq = 1;
+ internal const int freq = 1; // LUCENENET: marked const
public FakeScorer()
: base(null)
diff --git a/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs b/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs
index fa42f72ec0..44ec1ab318 100644
--- a/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/Directory/DirectoryTaxonomyWriter.cs
@@ -1127,7 +1127,7 @@ public int[] GetMap()
///
public sealed class DiskOrdinalMap : IOrdinalMap
{
- internal string tmpfile;
+ internal readonly string tmpfile; // LUCENENET: marked readonly
internal OutputStreamDataOutput @out;
///
diff --git a/src/Lucene.Net.Facet/Taxonomy/WriterCache/CharBlockArray.cs b/src/Lucene.Net.Facet/Taxonomy/WriterCache/CharBlockArray.cs
index 6459db7808..07fc91f4ec 100644
--- a/src/Lucene.Net.Facet/Taxonomy/WriterCache/CharBlockArray.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/WriterCache/CharBlockArray.cs
@@ -97,9 +97,9 @@ public Block(Stream reader)
}
}
- internal IList blocks;
+ internal readonly IList blocks; // LUCENENET: marked readonly
internal Block current;
- internal int blockSize;
+ internal readonly int blockSize; // LUCENENET: marked readonly
internal int length;
public CharBlockArray()
diff --git a/src/Lucene.Net.Facet/Taxonomy/WriterCache/CollisionMap.cs b/src/Lucene.Net.Facet/Taxonomy/WriterCache/CollisionMap.cs
index c466b89a7a..1442e1f098 100644
--- a/src/Lucene.Net.Facet/Taxonomy/WriterCache/CollisionMap.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/WriterCache/CollisionMap.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Facet.Taxonomy.WriterCache
///
/// HashMap to store colliding labels. See for
/// details.
- ///
+ ///
/// @lucene.experimental
///
public class CollisionMap
@@ -37,10 +37,10 @@ public class CollisionMap
internal class Entry
{
- internal int offset;
- internal int cid;
+ internal readonly int offset; // LUCENENET: marked readonly
+ internal readonly int cid; // LUCENENET: marked readonly
internal Entry next;
- internal int hash;
+ internal readonly int hash; // LUCENENET: marked readonly
internal Entry(int offset, int cid, int h, Entry e)
{
@@ -81,7 +81,7 @@ private CollisionMap(int initialCapacity, float loadFactor, CharBlockArray label
public virtual int Count => this.size;
///
- /// How many slots are allocated.
+ /// How many slots are allocated.
///
public virtual int Capacity => this.capacity;
@@ -115,8 +115,8 @@ private void Grow()
}
///
- /// Return the mapping, or
- /// if the label isn't recognized.
+ /// Return the mapping, or
+ /// if the label isn't recognized.
///
public virtual int Get(FacetLabel label, int hash)
{
@@ -136,7 +136,7 @@ public virtual int Get(FacetLabel label, int hash)
}
///
- /// Add another mapping.
+ /// Add another mapping.
///
public virtual int AddLabel(FacetLabel label, int hash, int cid)
{
@@ -183,7 +183,7 @@ private void AddEntry(int offset, int cid, int hash, int bucketIndex)
}
///
- /// Returns index for hash code h.
+ /// Returns index for hash code h.
///
internal static int IndexFor(int h, int length)
{
@@ -217,7 +217,7 @@ private sealed class EntryEnumerator : IEnumerator // LUCENENET: Marked s
{
internal Entry next; // next entry to return
internal int index; // current slot
- internal Entry[] ents;
+ internal readonly Entry[] ents; // LUCENENET: marked readonly
internal EntryEnumerator(Entry[] entries, int size)
{
@@ -287,4 +287,4 @@ public void Reset()
object IEnumerator.Current => Current;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Facet/Taxonomy/WriterCache/CompactLabelToOrdinal.cs b/src/Lucene.Net.Facet/Taxonomy/WriterCache/CompactLabelToOrdinal.cs
index fb4272b7cd..02f8c80d6c 100644
--- a/src/Lucene.Net.Facet/Taxonomy/WriterCache/CompactLabelToOrdinal.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/WriterCache/CompactLabelToOrdinal.cs
@@ -484,10 +484,10 @@ internal virtual void Flush(Stream stream)
private sealed class HashArray
{
- internal int[] offsets;
- internal int[] cids;
+ internal readonly int[] offsets; // LUCENENET: marked readonly
+ internal readonly int[] cids; // LUCENENET: marked readonly
- internal int capacity;
+ internal readonly int capacity; // LUCENENET: marked readonly
internal HashArray(int c)
{
diff --git a/src/Lucene.Net.Grouping/Term/TermAllGroupHeadsCollector.cs b/src/Lucene.Net.Grouping/Term/TermAllGroupHeadsCollector.cs
index f0580225d2..e84247af88 100644
--- a/src/Lucene.Net.Grouping/Term/TermAllGroupHeadsCollector.cs
+++ b/src/Lucene.Net.Grouping/Term/TermAllGroupHeadsCollector.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Search.Grouping.Terms
/// A base implementation of for retrieving the most relevant groups when grouping
/// on a string based group field. More specifically this all concrete implementations of this base implementation
/// use .
- ///
+ ///
/// @lucene.experimental
///
///
@@ -382,9 +382,9 @@ internal class GroupHead : AbstractAllGroupHeadsCollector_GroupHead /*AbstractAl
public BytesRef GroupValue => groupValue;
private readonly BytesRef groupValue;
- internal BytesRef[] sortValues;
- internal int[] sortOrds;
- internal float[] scores;
+ internal readonly BytesRef[] sortValues; // LUCENENET: marked readonly
+ internal readonly int[] sortOrds; // LUCENENET: marked readonly
+ internal readonly float[] scores; // LUCENENET: marked readonly
internal GroupHead(OrdScoreAllGroupHeadsCollector outerInstance, int doc, BytesRef groupValue)
: base(doc + outerInstance.readerContext.DocBase)
@@ -596,8 +596,8 @@ internal class GroupHead : AbstractAllGroupHeadsCollector_GroupHead /* AbstractA
// need to reference the generic closing type BytesRef.
public BytesRef GroupValue => groupValue;
private readonly BytesRef groupValue;
- internal BytesRef[] sortValues;
- internal int[] sortOrds;
+ internal readonly BytesRef[] sortValues; // LUCENENET: marked readonly
+ internal readonly int[] sortOrds; // LUCENENET: marked readonly
internal GroupHead(OrdAllGroupHeadsCollector outerInstance, int doc, BytesRef groupValue)
: base(doc + outerInstance.readerContext.DocBase)
@@ -758,7 +758,7 @@ internal class GroupHead : AbstractAllGroupHeadsCollector_GroupHead
// need to reference the generic closing type BytesRef.
public BytesRef GroupValue => groupValue;
private readonly BytesRef groupValue;
- internal float[] scores;
+ internal readonly float[] scores; // LUCENENET: marked readonly
internal GroupHead(ScoreAllGroupHeadsCollector outerInstance, int doc, BytesRef groupValue)
: base(doc + outerInstance.readerContext.DocBase)
diff --git a/src/Lucene.Net.Highlighter/Highlight/SimpleHTMLFormatter.cs b/src/Lucene.Net.Highlighter/Highlight/SimpleHTMLFormatter.cs
index a3bba74f05..ae99fc520d 100644
--- a/src/Lucene.Net.Highlighter/Highlight/SimpleHTMLFormatter.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/SimpleHTMLFormatter.cs
@@ -26,8 +26,8 @@ public class SimpleHTMLFormatter : IFormatter
private const string DEFAULT_PRE_TAG = "";
private const string DEFAULT_POST_TAG = " ";
- internal string preTag;
- internal string postTag;
+ internal readonly string preTag; // LUCENENET: marked readonly
+ internal readonly string postTag; // LUCENENET: marked readonly
public SimpleHTMLFormatter(string preTag, string postTag)
{
@@ -35,7 +35,7 @@ public SimpleHTMLFormatter(string preTag, string postTag)
this.postTag = postTag;
}
- ///
+ ///
/// Default constructor uses HTML: <B> tags to markup terms
///
public SimpleHTMLFormatter() : this(DEFAULT_PRE_TAG, DEFAULT_POST_TAG) { }
@@ -52,7 +52,7 @@ public virtual string HighlightTerm(string originalText, TokenGroup tokenGroup)
// Allocate StringBuilder with the right number of characters from the
// beginning, to avoid char[] allocations in the middle of appends.
- StringBuilder returnBuffer = new StringBuilder(preTag.Length + originalText.Length + postTag.Length);
+ StringBuilder returnBuffer = new StringBuilder(preTag.Length + originalText.Length + postTag.Length);
returnBuffer.Append(preTag);
returnBuffer.Append(originalText);
returnBuffer.Append(postTag);
diff --git a/src/Lucene.Net.Highlighter/Highlight/TokenGroup.cs b/src/Lucene.Net.Highlighter/Highlight/TokenGroup.cs
index aa3aa82d92..0d7b21cda9 100644
--- a/src/Lucene.Net.Highlighter/Highlight/TokenGroup.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/TokenGroup.cs
@@ -30,8 +30,8 @@ public class TokenGroup
{
private const int MAX_NUM_TOKENS_PER_GROUP = 50;
- internal Token[] tokens = new Token[MAX_NUM_TOKENS_PER_GROUP];
- internal float[] scores = new float[MAX_NUM_TOKENS_PER_GROUP];
+ internal readonly Token[] tokens = new Token[MAX_NUM_TOKENS_PER_GROUP]; // LUCENENET: marked readonly
+ internal readonly float[] scores = new float[MAX_NUM_TOKENS_PER_GROUP]; // LUCENENET: marked readonly
internal int MatchStartOffset { get; set; }
internal int MatchEndOffset { get; set; }
diff --git a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
index 24c2154690..fcb16ef1f8 100644
--- a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
@@ -57,12 +57,12 @@ public int Compare(Token t1, Token t2)
internal sealed class StoredTokenStream : TokenStream
{
- internal Token[] tokens;
+ internal readonly Token[] tokens; // LUCENENET: marked readonly
internal int currentToken = 0;
- internal ICharTermAttribute termAtt;
- internal IOffsetAttribute offsetAtt;
- internal IPositionIncrementAttribute posincAtt;
- internal IPayloadAttribute payloadAtt;
+ internal readonly ICharTermAttribute termAtt; // LUCENENET: marked readonly
+ internal readonly IOffsetAttribute offsetAtt; // LUCENENET: marked readonly
+ internal readonly IPositionIncrementAttribute posincAtt; // LUCENENET: marked readonly
+ internal readonly IPayloadAttribute payloadAtt; // LUCENENET: marked readonly
internal StoredTokenStream(Token[] tokens)
{
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
index da517f697c..e796eff56c 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
@@ -804,9 +804,9 @@ protected virtual Passage[] GetEmptyHighlight(string fieldName, BreakIterator bi
internal class OffsetsEnum : IComparable
{
- internal DocsAndPositionsEnum dp;
+ internal readonly DocsAndPositionsEnum dp; // LUCENENET: marked readonly
internal int pos;
- internal int id;
+ internal readonly int id; // LUCENENET: marked readonly
internal OffsetsEnum(DocsAndPositionsEnum dp, int id)
{
diff --git a/src/Lucene.Net.Highlighter/VectorHighlight/FieldPhraseList.cs b/src/Lucene.Net.Highlighter/VectorHighlight/FieldPhraseList.cs
index 122ecd8dfe..c2d47016ca 100644
--- a/src/Lucene.Net.Highlighter/VectorHighlight/FieldPhraseList.cs
+++ b/src/Lucene.Net.Highlighter/VectorHighlight/FieldPhraseList.cs
@@ -35,7 +35,7 @@ public class FieldPhraseList
///
/// List of non-overlapping objects.
///
- internal IList phraseList = new JCG.List();
+ internal readonly IList phraseList = new JCG.List(); // LUCENENET: marked readonly
///
/// create a that has no limit on the number of phrases to analyze
diff --git a/src/Lucene.Net.Highlighter/VectorHighlight/FieldQuery.cs b/src/Lucene.Net.Highlighter/VectorHighlight/FieldQuery.cs
index 624b3f11b7..b585197263 100644
--- a/src/Lucene.Net.Highlighter/VectorHighlight/FieldQuery.cs
+++ b/src/Lucene.Net.Highlighter/VectorHighlight/FieldQuery.cs
@@ -34,11 +34,11 @@ public class FieldQuery
// fieldMatch==true, Map
// fieldMatch==false, Map
- internal IDictionary rootMaps = new JCG.Dictionary();
+ internal readonly IDictionary rootMaps = new JCG.Dictionary(); // LUCENENET: marked readonly
// fieldMatch==true, Map
// fieldMatch==false, Map
- internal IDictionary> termSetMap = new JCG.Dictionary>();
+ internal readonly IDictionary> termSetMap = new JCG.Dictionary>(); // LUCENENET: marked readonly
internal int termOrPhraseNumber; // used for colored tag support
@@ -428,8 +428,8 @@ public class QueryPhraseMap
internal int slop; // valid if terminal == true and phraseHighlight == true
internal float boost; // valid if terminal == true
internal int termOrPhraseNumber; // valid if terminal == true
- internal FieldQuery fieldQuery;
- internal IDictionary subMap = new Dictionary();
+ internal readonly FieldQuery fieldQuery; // LUCENENET: marked readonly
+ internal readonly IDictionary subMap = new Dictionary(); // LUCENENET: marked readonly
public QueryPhraseMap(FieldQuery fieldQuery)
{
diff --git a/src/Lucene.Net.Highlighter/VectorHighlight/FieldTermStack.cs b/src/Lucene.Net.Highlighter/VectorHighlight/FieldTermStack.cs
index a8a0e3b3b3..fa14efec10 100644
--- a/src/Lucene.Net.Highlighter/VectorHighlight/FieldTermStack.cs
+++ b/src/Lucene.Net.Highlighter/VectorHighlight/FieldTermStack.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Search.VectorHighlight
public class FieldTermStack
{
private readonly string fieldName;
- internal IList termList = new JCG.List();
+ internal readonly IList termList = new JCG.List(); // LUCENENET: marked readonly
//public static void main( string[] args ) throws Exception {
// Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
diff --git a/src/Lucene.Net.Memory/MemoryIndex.Info.cs b/src/Lucene.Net.Memory/MemoryIndex.Info.cs
index 354124b683..feb4ecb812 100644
--- a/src/Lucene.Net.Memory/MemoryIndex.Info.cs
+++ b/src/Lucene.Net.Memory/MemoryIndex.Info.cs
@@ -36,36 +36,36 @@ private sealed class Info
internal readonly SliceByteStartArray sliceArray;
///
- /// Terms sorted ascending by term text; computed on demand
+ /// Terms sorted ascending by term text; computed on demand
///
internal int[] sortedTerms;
///
- /// Number of added tokens for this field
+ /// Number of added tokens for this field
///
internal readonly int numTokens;
///
- /// Number of overlapping tokens for this field
+ /// Number of overlapping tokens for this field
///
internal readonly int numOverlapTokens;
///
- /// Boost factor for hits for this field
+ /// Boost factor for hits for this field
///
internal readonly float boost;
internal readonly long sumTotalTermFreq;
///
- /// the last position encountered in this field for multi field support
+ /// the last position encountered in this field for multi field support
///
- internal int lastPosition;
+ internal readonly int lastPosition; // LUCENENET: marked readonly
///
- /// the last offset encountered in this field for multi field support
+ /// the last offset encountered in this field for multi field support
///
- internal int lastOffset;
+ internal readonly int lastOffset; // LUCENENET: marked readonly
public Info(BytesRefHash terms, SliceByteStartArray sliceArray, int numTokens, int numOverlapTokens, float boost, int lastPosition, int lastOffset, long sumTotalTermFreq)
{
diff --git a/src/Lucene.Net.Memory/MemoryIndex.MemoryIndexReader.cs b/src/Lucene.Net.Memory/MemoryIndex.MemoryIndexReader.cs
index d1f350a4f8..18bd04336a 100644
--- a/src/Lucene.Net.Memory/MemoryIndex.MemoryIndexReader.cs
+++ b/src/Lucene.Net.Memory/MemoryIndex.MemoryIndexReader.cs
@@ -409,7 +409,7 @@ private class MemoryDocsAndPositionsEnum : DocsAndPositionsEnum
internal bool hasNext;
internal IBits liveDocs;
internal int doc = -1;
- internal Int32BlockPool.SliceReader sliceReader;
+ internal readonly Int32BlockPool.SliceReader sliceReader; // LUCENENET: marked readonly
internal int freq;
internal int startOffset;
internal int endOffset;
diff --git a/src/Lucene.Net.Misc/Document/LazyDocument.cs b/src/Lucene.Net.Misc/Document/LazyDocument.cs
index 5537216c84..b60e771272 100644
--- a/src/Lucene.Net.Misc/Document/LazyDocument.cs
+++ b/src/Lucene.Net.Misc/Document/LazyDocument.cs
@@ -150,8 +150,8 @@ public class LazyField : IIndexableField, IFormattable
{
private readonly LazyDocument outerInstance;
- internal string name;
- internal int fieldNum;
+ internal readonly string name; // LUCENENET: marked readonly
+ internal readonly int fieldNum; // LUCENENET: marked readonly
internal volatile IIndexableField realValue = null;
internal LazyField(LazyDocument outerInstance, string name, int fieldNum)
diff --git a/src/Lucene.Net.Misc/Index/IndexSplitter.cs b/src/Lucene.Net.Misc/Index/IndexSplitter.cs
index 5a842805d5..f3a06c8a39 100644
--- a/src/Lucene.Net.Misc/Index/IndexSplitter.cs
+++ b/src/Lucene.Net.Misc/Index/IndexSplitter.cs
@@ -55,9 +55,9 @@ public class IndexSplitter
{
public SegmentInfos Infos { get; set; }
- internal FSDirectory fsDir;
+ internal readonly FSDirectory fsDir; // LUCENENET: marked readonly
- internal DirectoryInfo dir;
+ internal readonly DirectoryInfo dir; // LUCENENET: marked readonly
///
/// LUCENENET specific: In the Java implementation, this Main method
diff --git a/src/Lucene.Net.QueryParser/Classic/FastCharStream.cs b/src/Lucene.Net.QueryParser/Classic/FastCharStream.cs
index 7c02de3fd3..4ae016b04c 100644
--- a/src/Lucene.Net.QueryParser/Classic/FastCharStream.cs
+++ b/src/Lucene.Net.QueryParser/Classic/FastCharStream.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.QueryParsers.Classic
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
+
///
- /// An efficient implementation of JavaCC's interface.
+ /// An efficient implementation of JavaCC's interface.
///
/// Note that
/// this does not do line-number counting, but instead keeps track of the
@@ -33,34 +33,34 @@ namespace Lucene.Net.QueryParsers.Classic
public sealed class FastCharStream : ICharStream
{
internal char[] buffer = null;
-
+
internal int bufferLength = 0; // end of valid chars
internal int bufferPosition = 0; // next char to read
-
+
internal int tokenStart = 0; // offset in buffer
internal int bufferStart = 0; // position in file of buffer
-
- internal TextReader input; // source of chars
+
+ internal readonly TextReader input; // source of chars // LUCENENET: marked readonly
///
- /// Constructs from a .
+ /// Constructs from a .
///
public FastCharStream(TextReader r)
{
input = r;
}
-
+
public char ReadChar()
{
if (bufferPosition >= bufferLength)
Refill();
return buffer[bufferPosition++];
}
-
+
private void Refill()
{
int newPosition = bufferLength - tokenStart;
-
+
if (tokenStart == 0)
{
// token won't fit in buffer
@@ -82,25 +82,25 @@ private void Refill()
// shift token to front
Arrays.Copy(buffer, tokenStart, buffer, 0, newPosition);
}
-
+
bufferLength = newPosition; // update state
bufferPosition = newPosition;
bufferStart += tokenStart;
tokenStart = 0;
-
+
int charsRead = input.Read(buffer, newPosition, buffer.Length - newPosition);
if (charsRead <= 0)
throw new IOException("read past eof");
else
bufferLength += charsRead;
}
-
+
public char BeginToken()
{
tokenStart = bufferPosition;
return ReadChar();
}
-
+
public void BackUp(int amount)
{
bufferPosition -= amount;
@@ -114,7 +114,7 @@ public char[] GetSuffix(int len)
Arrays.Copy(buffer, bufferPosition - len, value, 0, len);
return value;
}
-
+
public void Done()
{
try
@@ -139,4 +139,4 @@ public void Done()
public int BeginLine => 1;
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.QueryParser/Classic/QueryParserTokenManager.cs b/src/Lucene.Net.QueryParser/Classic/QueryParserTokenManager.cs
index 79d0032bd0..19a89ae908 100644
--- a/src/Lucene.Net.QueryParser/Classic/QueryParserTokenManager.cs
+++ b/src/Lucene.Net.QueryParser/Classic/QueryParserTokenManager.cs
@@ -1231,7 +1231,7 @@ protected internal virtual Token JjFillToken()
}
internal int curLexState = 2;
- internal int defaultLexState = 2;
+ internal const int defaultLexState = 2; // LUCENENET: marked const
internal int jjnewStateCnt;
internal uint jjround;
internal int jjmatchedPos;
diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs
index 420f7674e0..11d1819f74 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs
@@ -857,7 +857,7 @@ protected Token JjFillToken()
}
internal int curLexState = 2;
- internal int defaultLexState = 2;
+ internal const int defaultLexState = 2; // LUCENENET: marked const
internal int jjnewStateCnt;
internal uint jjround;
internal int jjmatchedPos;
diff --git a/src/Lucene.Net.QueryParser/Surround/Parser/FastCharStream.cs b/src/Lucene.Net.QueryParser/Surround/Parser/FastCharStream.cs
index fbf9b8424a..77ef1f4e1b 100644
--- a/src/Lucene.Net.QueryParser/Surround/Parser/FastCharStream.cs
+++ b/src/Lucene.Net.QueryParser/Surround/Parser/FastCharStream.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.QueryParsers.Surround.Parser
*/
///
- /// An efficient implementation of JavaCC's interface.
+ /// An efficient implementation of JavaCC's interface.
///
/// Note that
/// this does not do line-number counting, but instead keeps track of the
@@ -40,10 +40,10 @@ public sealed class FastCharStream : ICharStream
internal int tokenStart = 0; // offset in buffer
internal int bufferStart = 0; // position in file of buffer
- internal System.IO.TextReader input; // source of chars
+ internal readonly System.IO.TextReader input; // source of chars // LUCENENET: marked readonly
///
- /// Constructs from a .
+ /// Constructs from a .
///
public FastCharStream(System.IO.TextReader r)
{
@@ -139,4 +139,4 @@ public void Done()
public int BeginLine => 1;
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.QueryParser/Surround/Parser/QueryParserTokenManager.cs b/src/Lucene.Net.QueryParser/Surround/Parser/QueryParserTokenManager.cs
index c4999c4bc8..67029431a8 100644
--- a/src/Lucene.Net.QueryParser/Surround/Parser/QueryParserTokenManager.cs
+++ b/src/Lucene.Net.QueryParser/Surround/Parser/QueryParserTokenManager.cs
@@ -643,7 +643,7 @@ protected Token JjFillToken()
}
internal int curLexState = 1;
- internal int defaultLexState = 1;
+ internal const int defaultLexState = 1; // LUCENENET: marked const
internal int jjnewStateCnt;
internal uint jjround;
internal int jjmatchedPos;
diff --git a/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs b/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
index 9fd5d3ceac..5a334028f3 100644
--- a/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
+++ b/src/Lucene.Net.Sandbox/Queries/FuzzyLikeThisQuery.cs
@@ -122,10 +122,10 @@ public FuzzyLikeThisQuery(int maxNumTerms, Analyzer analyzer)
internal class FieldVals
{
- internal string queryString;
- internal string fieldName;
- internal float minSimilarity;
- internal int prefixLength;
+ internal readonly string queryString; // LUCENENET: marked readonly
+ internal readonly string fieldName; // LUCENENET: marked readonly
+ internal readonly float minSimilarity; // LUCENENET: marked readonly
+ internal readonly int prefixLength; // LUCENENET: marked readonly
public FieldVals(string name, float similarity, int length, string queryString)
{
fieldName = name;
diff --git a/src/Lucene.Net.Spatial/Prefix/ContainsPrefixTreeFilter.cs b/src/Lucene.Net.Spatial/Prefix/ContainsPrefixTreeFilter.cs
index 34b8fd4445..e9c67661be 100644
--- a/src/Lucene.Net.Spatial/Prefix/ContainsPrefixTreeFilter.cs
+++ b/src/Lucene.Net.Spatial/Prefix/ContainsPrefixTreeFilter.cs
@@ -80,7 +80,7 @@ public ContainsVisitor(ContainsPrefixTreeFilter outerInstance, AtomicReaderConte
{
}
- internal BytesRef termBytes = new BytesRef();
+ internal readonly BytesRef termBytes = new BytesRef(); // LUCENENET: marked readonly
internal Cell? nextCell;//see getLeafDocs
/// This is the primary algorithm; recursive. Returns null if finds none.
diff --git a/src/Lucene.Net.Suggest/Spell/HighFrequencyDictionary.cs b/src/Lucene.Net.Suggest/Spell/HighFrequencyDictionary.cs
index ad056d3391..88d9e05444 100644
--- a/src/Lucene.Net.Suggest/Spell/HighFrequencyDictionary.cs
+++ b/src/Lucene.Net.Suggest/Spell/HighFrequencyDictionary.cs
@@ -27,10 +27,10 @@ namespace Lucene.Net.Search.Spell
/// HighFrequencyDictionary: terms taken from the given field
/// of a Lucene index, which appear in a number of documents
/// above a given threshold.
- ///
+ ///
/// Threshold is a value in [0..1] representing the minimum
/// number of documents (of the total) where a term should appear.
- ///
+ ///
/// Based on .
///
public class HighFrequencyDictionary : IDictionary
@@ -63,7 +63,7 @@ internal sealed class HighFrequencyEnumerator : IInputEnumerator
{
internal readonly BytesRef spare = new BytesRef();
internal readonly TermsEnum termsEnum;
- internal int minNumDocs;
+ internal readonly int minNumDocs; // LUCENENET: marked readonly
internal long freq;
private BytesRef current;
@@ -133,4 +133,4 @@ public IComparer Comparer
public bool HasContexts => false;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Suggest/Spell/TermFreqIterator.cs b/src/Lucene.Net.Suggest/Spell/TermFreqIterator.cs
index f2e586ab7d..93c83b39fd 100644
--- a/src/Lucene.Net.Suggest/Spell/TermFreqIterator.cs
+++ b/src/Lucene.Net.Suggest/Spell/TermFreqIterator.cs
@@ -39,10 +39,10 @@ public interface ITermFreqEnumerator : IBytesRefEnumerator
///
public class TermFreqEnumeratorWrapper : ITermFreqEnumerator
{
- internal IBytesRefEnumerator wrapped;
+ internal readonly IBytesRefEnumerator wrapped; // LUCENENET: marked readonly
///
- /// Creates a new wrapper, wrapping the specified iterator and
+ /// Creates a new wrapper, wrapping the specified iterator and
/// specifying a weight value of 1
for all terms.
///
public TermFreqEnumeratorWrapper(IBytesRefEnumerator wrapped)
@@ -58,4 +58,4 @@ public TermFreqEnumeratorWrapper(IBytesRefEnumerator wrapped)
public virtual IComparer Comparer => wrapped.Comparer;
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterIndexWriterConfigFactory.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterIndexWriterConfigFactory.cs
index f541e79ea3..1d17dd4b86 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterIndexWriterConfigFactory.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggesterIndexWriterConfigFactory.cs
@@ -34,14 +34,14 @@ public interface IAnalyzingInfixSuggesterIndexWriterConfigFactory
{
IndexWriterConfig Get(LuceneVersion matchVersion, Analyzer indexAnalyzer, OpenMode openMode);
}
-
+
///
/// Default factory for .
///
///
public class AnalyzingInfixSuggesterIndexWriterConfigFactory : IAnalyzingInfixSuggesterIndexWriterConfigFactory
{
- private Sort sort;
+ private readonly Sort sort; // LUCENENET: marked readonly
///
/// Creates a new config factory that uses the given in the sorting merge policy
@@ -50,10 +50,10 @@ public AnalyzingInfixSuggesterIndexWriterConfigFactory(Sort sort)
{
this.sort = sort;
}
-
+
///
/// Override this to customize index settings, e.g. which
- /// codec to use.
+ /// codec to use.
///
public virtual IndexWriterConfig Get(LuceneVersion matchVersion, Analyzer indexAnalyzer, OpenMode openMode)
{
@@ -71,4 +71,4 @@ public virtual IndexWriterConfig Get(LuceneVersion matchVersion, Analyzer indexA
return iwc;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs b/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
index 4b453c6614..72073b1d42 100644
--- a/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
+++ b/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Search.Suggest
///
/// Dictionary represented by a text file.
- ///
+ ///
/// Format allowed: 1 entry per line:
/// An entry can be:
///
@@ -38,12 +38,12 @@ namespace Lucene.Net.Search.Suggest
///
/// where the default is (a tab)
///
- /// NOTE:
+ /// NOTE:
///
/// In order to have payload enabled, the first entry has to have a payload
/// If the weight for an entry is not specified then a value of 1 is used
/// A payload cannot be specified without having the weight specified for an entry
- /// If the payload for an entry is not specified (assuming payload is enabled)
+ /// If the payload for an entry is not specified (assuming payload is enabled)
/// then an empty payload is returned
/// An entry cannot have more than two s
///
@@ -67,7 +67,7 @@ public class FileDictionary : IDictionary
///
/// Creates a dictionary based on an inputstream.
- /// Using as the
+ /// Using as the
/// field seperator in a line.
///
/// NOTE: content is treated as UTF-8
@@ -80,7 +80,7 @@ public FileDictionary(Stream dictFile)
///
/// Creates a dictionary based on a reader.
- /// Using as the
+ /// Using as the
/// field seperator in a line.
///
public FileDictionary(TextReader reader)
@@ -89,7 +89,7 @@ public FileDictionary(TextReader reader)
}
///
- /// Creates a dictionary based on a reader.
+ /// Creates a dictionary based on a reader.
/// Using to seperate out the
/// fields in a line.
///
@@ -133,7 +133,7 @@ internal sealed class FileEnumerator : IInputEnumerator
internal readonly BytesRef spare = new BytesRef();
internal BytesRef curPayload = new BytesRef();
internal bool isFirstLine = true;
- internal bool hasPayloads = false;
+ internal readonly bool hasPayloads /*= false*/; // LUCENENET: marked readonly
private BytesRef current;
internal FileEnumerator(FileDictionary outerInstance)
@@ -259,4 +259,4 @@ internal void ReadWeight(string weight)
public bool HasContexts => false;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Suggest/Suggest/Tst/TSTLookup.cs b/src/Lucene.Net.Suggest/Suggest/Tst/TSTLookup.cs
index b36a0fa019..d9c2e5207b 100644
--- a/src/Lucene.Net.Suggest/Suggest/Tst/TSTLookup.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Tst/TSTLookup.cs
@@ -25,14 +25,14 @@ namespace Lucene.Net.Search.Suggest.Tst
*/
///
- /// Suggest implementation based on a
+ /// Suggest implementation based on a
/// Ternary Search Tree
///
///
public class TSTLookup : Lookup
{
internal TernaryTreeNode root = new TernaryTreeNode();
- internal TSTAutocomplete autocomplete = new TSTAutocomplete();
+ internal readonly TSTAutocomplete autocomplete = new TSTAutocomplete(); // LUCENENET: marked readonly
///
/// Number of entries the lookup was built with
@@ -136,7 +136,7 @@ private static bool CharSeqEquals(string left, string right)
}
return true;
}
-
+
public override IList DoLookup(string key, IEnumerable contexts, bool onlyMorePopular, int num)
{
if (contexts != null)
@@ -307,4 +307,4 @@ public override long GetSizeInBytes()
public override long Count => count;
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.TestFramework/Analysis/MockCharFilter.cs b/src/Lucene.Net.TestFramework/Analysis/MockCharFilter.cs
index 442ff02053..3092bef73b 100644
--- a/src/Lucene.Net.TestFramework/Analysis/MockCharFilter.cs
+++ b/src/Lucene.Net.TestFramework/Analysis/MockCharFilter.cs
@@ -121,6 +121,6 @@ protected virtual void AddOffCorrectMap(int off, int cumulativeDiff)
corrections[off] = cumulativeDiff;
}
- internal JCG.SortedDictionary corrections = new JCG.SortedDictionary();
+ internal readonly JCG.SortedDictionary corrections = new JCG.SortedDictionary(); // LUCENENET: marked readonly
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.TestFramework/Codecs/Lucene3x/PreFlexRWFieldsWriter.cs b/src/Lucene.Net.TestFramework/Codecs/Lucene3x/PreFlexRWFieldsWriter.cs
index 320c16a6af..412a90887a 100644
--- a/src/Lucene.Net.TestFramework/Codecs/Lucene3x/PreFlexRWFieldsWriter.cs
+++ b/src/Lucene.Net.TestFramework/Codecs/Lucene3x/PreFlexRWFieldsWriter.cs
@@ -77,7 +77,7 @@ public PreFlexRWFieldsWriter(SegmentWriteState state)
}
}
- skipListWriter = new PreFlexRWSkipListWriter(termsOut.skipInterval, termsOut.maxSkipLevels, totalNumDocs, freqOut, proxOut);
+ skipListWriter = new PreFlexRWSkipListWriter(TermInfosWriter.skipInterval, TermInfosWriter.maxSkipLevels, totalNumDocs, freqOut, proxOut);
//System.out.println("\nw start seg=" + segment);
}
@@ -154,7 +154,7 @@ public override void StartDoc(int docID, int termDocFreq)
throw new CorruptIndexException("docs out of order (" + docID + " <= " + lastDocID + " )");
}
- if ((++df % outerInstance.outerInstance.termsOut.skipInterval) == 0)
+ if ((++df % TermInfosWriter.skipInterval) == 0)
{
outerInstance.outerInstance.skipListWriter.SetSkipData(lastDocID, outerInstance.storePayloads, lastPayloadLength);
outerInstance.outerInstance.skipListWriter.BufferSkip(df);
diff --git a/src/Lucene.Net.TestFramework/Codecs/Lucene3x/TermInfosWriter.cs b/src/Lucene.Net.TestFramework/Codecs/Lucene3x/TermInfosWriter.cs
index ec518d0958..51cb676081 100644
--- a/src/Lucene.Net.TestFramework/Codecs/Lucene3x/TermInfosWriter.cs
+++ b/src/Lucene.Net.TestFramework/Codecs/Lucene3x/TermInfosWriter.cs
@@ -72,13 +72,13 @@ internal sealed class TermInfosWriter : IDisposable
/// smaller values result in bigger indexes, less acceleration and more
/// accelerable cases. More detailed experiments would be useful here.
///
- internal int skipInterval = 16;
+ internal const int skipInterval = 16; // LUCENENET: marked const
///
/// Expert: The maximum number of skip levels. Smaller values result in
/// slightly smaller indexes, but slower skipping in big posting lists.
///
- internal int maxSkipLevels = 10;
+ internal const int maxSkipLevels = 10; // LUCENENET: marked const
private long lastIndexPointer;
private bool isIndex;
@@ -328,4 +328,4 @@ public void Dispose()
}
}
#pragma warning restore 612, 618
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.TestFramework/Codecs/Lucene42/Lucene42DocValuesConsumer.cs b/src/Lucene.Net.TestFramework/Codecs/Lucene42/Lucene42DocValuesConsumer.cs
index 0a9d12e9b9..0aa798649d 100644
--- a/src/Lucene.Net.TestFramework/Codecs/Lucene42/Lucene42DocValuesConsumer.cs
+++ b/src/Lucene.Net.TestFramework/Codecs/Lucene42/Lucene42DocValuesConsumer.cs
@@ -379,8 +379,8 @@ IEnumerator IEnumerable.GetEnumerator()
internal sealed class SortedSetEnumerator : IEnumerator
{
internal byte[] buffer = new byte[10];
- internal ByteArrayDataOutput @out = new ByteArrayDataOutput();
- internal BytesRef @ref = new BytesRef();
+ internal readonly ByteArrayDataOutput @out = new ByteArrayDataOutput(); // LUCENENET: marked readonly
+ internal readonly BytesRef @ref = new BytesRef(); // LUCENENET: marked readonly
internal readonly IEnumerator counts;
internal readonly IEnumerator ords;
diff --git a/src/Lucene.Net.TestFramework/Search/CheckHits.cs b/src/Lucene.Net.TestFramework/Search/CheckHits.cs
index 51dcf60320..c58264fa1e 100644
--- a/src/Lucene.Net.TestFramework/Search/CheckHits.cs
+++ b/src/Lucene.Net.TestFramework/Search/CheckHits.cs
@@ -485,10 +485,10 @@ public override TopDocs Search(Query query, Filter filter, int n)
///
public class ExplanationAsserter : ICollector
{
- internal Query q;
- internal IndexSearcher s;
- internal string d;
- internal bool deep;
+ internal readonly Query q; // LUCENENET: marked readonly
+ internal readonly IndexSearcher s; // LUCENENET: marked readonly
+ internal readonly string d; // LUCENENET: marked readonly
+ internal readonly bool deep; // LUCENENET: marked readonly
internal Scorer scorer;
internal int @base = 0;
diff --git a/src/Lucene.Net.TestFramework/Store/MockDirectoryWrapper.cs b/src/Lucene.Net.TestFramework/Store/MockDirectoryWrapper.cs
index 5986ea40e4..b245707817 100644
--- a/src/Lucene.Net.TestFramework/Store/MockDirectoryWrapper.cs
+++ b/src/Lucene.Net.TestFramework/Store/MockDirectoryWrapper.cs
@@ -102,7 +102,7 @@ public class MockDirectoryWrapper : BaseDirectoryWrapper
internal double randomIOExceptionRate;
internal double randomIOExceptionRateOnOpen;
- internal Random randomState;
+ internal readonly Random randomState; // LUCENENET: marked readonly
internal bool noDeleteOpenFile = true;
internal bool assertNoDeleteOpenFile = false;
internal bool preventDoubleWrite = true;
@@ -113,7 +113,7 @@ public class MockDirectoryWrapper : BaseDirectoryWrapper
private ISet unSyncedFiles;
private ISet createdFiles;
private ISet openFilesForWrite = new JCG.HashSet(StringComparer.Ordinal);
- internal ISet openLocks = new ConcurrentHashSet(StringComparer.Ordinal);
+ internal readonly ISet openLocks = new ConcurrentHashSet(StringComparer.Ordinal); // LUCENENET: marked readonly
internal volatile bool crashed;
private readonly ThrottledIndexOutput throttledOutput; // LUCENENET: marked readonly
private Throttling throttling = Throttling.SOMETIMES;
diff --git a/src/Lucene.Net.TestFramework/Store/MockIndexOutputWrapper.cs b/src/Lucene.Net.TestFramework/Store/MockIndexOutputWrapper.cs
index e55765efc1..609c3606d1 100644
--- a/src/Lucene.Net.TestFramework/Store/MockIndexOutputWrapper.cs
+++ b/src/Lucene.Net.TestFramework/Store/MockIndexOutputWrapper.cs
@@ -37,7 +37,7 @@ public class MockIndexOutputWrapper : IndexOutput
private bool first = true;
internal readonly string name;
- internal byte[] singleByte = new byte[1];
+ internal readonly byte[] singleByte = new byte[1]; // LUCENENET: marked readonly
///
/// Construct an empty output buffer.
diff --git a/src/Lucene.Net.TestFramework/Store/MockLockFactoryWrapper.cs b/src/Lucene.Net.TestFramework/Store/MockLockFactoryWrapper.cs
index 0a8e175488..2d0e5e7c35 100644
--- a/src/Lucene.Net.TestFramework/Store/MockLockFactoryWrapper.cs
+++ b/src/Lucene.Net.TestFramework/Store/MockLockFactoryWrapper.cs
@@ -23,8 +23,8 @@ namespace Lucene.Net.Store
///
public class MockLockFactoryWrapper : LockFactory
{
- internal MockDirectoryWrapper dir;
- internal LockFactory @delegate;
+ internal readonly MockDirectoryWrapper dir; // LUCENENET: marked readonly
+ internal readonly LockFactory @delegate; // LUCENENET: marked readonly
public MockLockFactoryWrapper(MockDirectoryWrapper dir, LockFactory @delegate)
{
@@ -58,8 +58,8 @@ private class MockLock : Lock
{
private readonly MockLockFactoryWrapper outerInstance;
- internal Lock delegateLock;
- internal string name;
+ internal readonly Lock delegateLock; // LUCENENET: marked readonly
+ internal readonly string name; // LUCENENET: marked readonly
internal MockLock(MockLockFactoryWrapper outerInstance, Lock @delegate, string name)
{
@@ -96,4 +96,4 @@ public override bool IsLocked()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs b/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
index 81b24930d5..a4ff76a940 100644
--- a/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
+++ b/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
@@ -557,7 +557,7 @@ public override string ToString()
}
internal readonly Outputs fstOutputs = ByteSequenceOutputs.Singleton;
- internal BytesRef NO_OUTPUT;
+ internal readonly BytesRef NO_OUTPUT; // LUCENENET: marked readonly
///
/// BlockTree's implementation of .
diff --git a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs
index 936c92888a..8597cec240 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs
@@ -35,8 +35,8 @@ namespace Lucene.Net.Codecs.Lucene3x
internal sealed class SegmentTermEnum : IDisposable // LUCENENET specific: Not implementing ICloneable per Microsoft's recommendation
{
private IndexInput input;
- internal FieldInfos fieldInfos;
- internal long size;
+ internal readonly FieldInfos fieldInfos; // LUCENENET: marked readonly
+ internal readonly long size; // LUCENENET: marked readonly
internal long position = -1;
// Changed strings to true utf8 with length-in-bytes not
@@ -59,10 +59,10 @@ internal sealed class SegmentTermEnum : IDisposable // LUCENENET specific: Not i
private readonly int format; // LUCENENET: marked readonly
private readonly bool isIndex = false; // LUCENENET: marked readonly
internal long indexPointer = 0;
- internal int indexInterval; // LUCENENET NOTE: Changed from public field to internal (class is internal anyway)
- internal int skipInterval;
+ internal readonly int indexInterval; // LUCENENET NOTE: Changed from public field to internal (class is internal anyway), marked readonly
+ internal readonly int skipInterval; // LUCENENET: marked readonly
internal int newSuffixStart;
- internal int maxSkipLevels;
+ internal readonly int maxSkipLevels; // LUCENENET: marked readonly
private bool first = true;
public SegmentTermEnum(IndexInput i, FieldInfos fis, bool isi)
@@ -262,4 +262,4 @@ public void Dispose()
input.Dispose();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs b/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs
index c08a988939..e2ff36dd5b 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs
@@ -74,7 +74,7 @@ public TermInfoAndOrd(TermInfo ti, long termOrd)
private class CloneableTerm : DoubleBarrelLRUCache.CloneableKey
{
- internal Term term;
+ internal readonly Term term; // LUCENENET: marked readonly
public CloneableTerm(Term t)
{
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs
index 3a2e6b2292..78ddd01cee 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs
@@ -191,7 +191,7 @@ public override void StartTerm(BytesRef term, int freq)
internal int lastOffset = 0;
internal int lastPayloadLength = -1; // force first payload to write its length
- internal BytesRef scratch = new BytesRef(); // used only by this optimized flush below
+ internal readonly BytesRef scratch = new BytesRef(); // used only by this optimized flush below // LUCENENET: marked readonly
public override void AddProx(int numProx, DataInput positions, DataInput offsets)
{
diff --git a/src/Lucene.Net/Document/Field.cs b/src/Lucene.Net/Document/Field.cs
index 36233835e0..8a2d27f80a 100644
--- a/src/Lucene.Net/Document/Field.cs
+++ b/src/Lucene.Net/Document/Field.cs
@@ -35,8 +35,8 @@ namespace Lucene.Net.Documents
///
/// Expert: directly create a field for a document. Most
- /// users should use one of the sugar subclasses: ,
- /// , , ,
+ /// users should use one of the sugar subclasses: ,
+ /// , , ,
/// , ,
/// , ,
/// , .
@@ -44,7 +44,7 @@ namespace Lucene.Net.Documents
/// A field is a section of a . Each field has three
/// parts: name, type and value. Values may be text
/// ( , or pre-analyzed ), binary
- /// ( ), or numeric ( , , , or ).
+ /// ( ), or numeric ( , , , or ).
/// Fields are optionally stored in the
/// index, so that they may be returned with hits on the document.
///
@@ -72,7 +72,7 @@ public partial class Field : IIndexableField, IFormattable
private object fieldsData;
///
- /// Field's value
+ /// Field's value
///
/// Setting this property will automatically set the backing field for the
/// property.
@@ -320,7 +320,7 @@ public virtual string GetStringValue() // LUCENENET specific: Added verb Get to
/// An object that supplies culture-specific formatting information. This parameter has no effect if this field is non-numeric.
/// The string representation of the value if it is either a or numeric type.
// LUCENENET specific overload.
- public virtual string GetStringValue(IFormatProvider provider)
+ public virtual string GetStringValue(IFormatProvider provider)
{
return GetStringValue(null, provider);
}
@@ -333,7 +333,7 @@ public virtual string GetStringValue(IFormatProvider provider)
/// A standard or custom numeric format string. This parameter has no effect if this field is non-numeric.
/// The string representation of the value if it is either a or numeric type.
// LUCENENET specific overload.
- public virtual string GetStringValue(string format)
+ public virtual string GetStringValue(string format)
{
return GetStringValue(format, null);
}
@@ -411,7 +411,7 @@ public virtual void SetStringValue(string value)
}
///
- /// Expert: change the value of this field. See
+ /// Expert: change the value of this field. See
/// .
///
public virtual void SetReaderValue(TextReader value)
@@ -532,7 +532,7 @@ public virtual void SetDoubleValue(double value)
// LUCENENET TODO: Add SetValue() overloads for each type?
// Upside: Simpler API.
- // Downside: Must be vigilant about what type is passed or the wrong overload will be called and will get a runtime exception.
+ // Downside: Must be vigilant about what type is passed or the wrong overload will be called and will get a runtime exception.
///
/// Expert: sets the token stream to be used for indexing and causes
@@ -602,15 +602,15 @@ public virtual object GetNumericValue() // LUCENENET specific: Added verb Get to
///
/// Gets the of the underlying value, or if the value is not set or non-numeric.
///
- /// Expert: The difference between this property and is
+ /// Expert: The difference between this property and is
/// this is represents the current state of the field (whether being written or read) and the
/// property represents instructions on how the field will be written,
/// but does not re-populate when reading back from an index (it is write-only).
///
- /// In Java, the numeric type was determined by checking the type of
+ /// In Java, the numeric type was determined by checking the type of
/// . However, since there are no reference number
/// types in .NET, using so will cause boxing/unboxing. It is
- /// therefore recommended to use this property to check the underlying type and the corresponding
+ /// therefore recommended to use this property to check the underlying type and the corresponding
/// Get*Value() method to retrieve the value.
///
/// NOTE: Since Lucene codecs do not support or ,
@@ -770,7 +770,7 @@ public override string ToString()
}
///
- /// Prints a for human consumption.
+ /// Prints a for human consumption.
///
/// A standard or custom numeric format string. This parameter has no effect if this field is non-numeric.
// LUCENENET specific - method added for better .NET compatibility
@@ -906,8 +906,8 @@ public virtual TokenStream GetTokenStream(Analyzer analyzer)
internal sealed class StringTokenStream : TokenStream
{
- internal ICharTermAttribute termAttribute;
- internal IOffsetAttribute offsetAttribute;
+ internal readonly ICharTermAttribute termAttribute; // LUCENENET: marked readonly
+ internal readonly IOffsetAttribute offsetAttribute; // LUCENENET: marked readonly
internal bool used = false;
internal string value = null;
@@ -985,7 +985,7 @@ public enum Store
//
///
- /// Specifies whether and how a field should be indexed.
+ /// Specifies whether and how a field should be indexed.
///
[Obsolete("This is here only to ease transition from the pre-4.0 APIs.")]
public enum Index
@@ -1035,13 +1035,13 @@ public enum Index
}
///
- /// Specifies whether and how a field should have term vectors.
+ /// Specifies whether and how a field should have term vectors.
///
[Obsolete("This is here only to ease transition from the pre-4.0 APIs.")]
public enum TermVector
{
///
- /// Do not store term vectors.
+ /// Do not store term vectors.
///
NO,
@@ -1197,7 +1197,7 @@ public Field(string name, TextReader reader)
}
///
- /// Create a tokenized and indexed field that is not stored, optionally with
+ /// Create a tokenized and indexed field that is not stored, optionally with
/// storing term vectors. The is read only when the is added to the index,
/// i.e. you may not close the until
/// has been called.
@@ -1229,7 +1229,7 @@ public Field(string name, TokenStream tokenStream)
}
///
- /// Create a tokenized and indexed field that is not stored, optionally with
+ /// Create a tokenized and indexed field that is not stored, optionally with
/// storing term vectors. This is useful for pre-analyzed fields.
/// The is read only when the is added to the index,
/// i.e. you may not close the until
@@ -1412,7 +1412,7 @@ public static Field.Index ToIndex(bool indexed, bool analyed)
}
[Obsolete("This is here only to ease transition from the pre-4.0 APIs.")]
- public static Field.Index ToIndex(bool indexed, bool analyzed, bool omitNorms)
+ public static Field.Index ToIndex(bool indexed, bool analyzed, bool omitNorms)
{
// If it is not indexed nothing else matters
if (!indexed)
@@ -1473,8 +1473,8 @@ public static Field.TermVector ToTermVector(bool stored, bool withOffsets, bool
// LUCENENET specific
// Since we have more numeric types on Field than on FieldType,
// a new enumeration was created for .NET. In Java, this type was
- // determined by checking the data type of the Field.numericValue()
- // method. However, since the corresponding GetNumericValue() method
+ // determined by checking the data type of the Field.numericValue()
+ // method. However, since the corresponding GetNumericValue() method
// in .NET returns type object (which would result in boxing/unboxing),
// this has been refactored to use an enumeration instead, which makes the
// API easier to use.
@@ -1511,8 +1511,8 @@ public enum NumericFieldType
SINGLE,
///
- /// 64-bit double numeric type
+ /// 64-bit double numeric type
///
DOUBLE
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Index/ConcurrentMergeScheduler.cs b/src/Lucene.Net/Index/ConcurrentMergeScheduler.cs
index b506af1e98..c1f55341c8 100644
--- a/src/Lucene.Net/Index/ConcurrentMergeScheduler.cs
+++ b/src/Lucene.Net/Index/ConcurrentMergeScheduler.cs
@@ -293,7 +293,7 @@ protected virtual void UpdateMergeThreads()
/// conjunction with , like that:
///
///
- /// if (IsVerbose)
+ /// if (IsVerbose)
/// {
/// Message("your message");
/// }
@@ -571,8 +571,8 @@ protected internal class MergeThread : ThreadJob
{
private readonly ConcurrentMergeScheduler outerInstance;
- internal IndexWriter tWriter;
- internal MergePolicy.OneMerge startMerge;
+ internal readonly IndexWriter tWriter; // LUCENENET: marked readonly
+ internal readonly MergePolicy.OneMerge startMerge; // LUCENENET: marked readonly
internal MergePolicy.OneMerge runningMerge;
private volatile bool done;
@@ -616,7 +616,7 @@ public virtual MergePolicy.OneMerge RunningMerge
}
///
- /// Return the current merge, or null if this
+ /// Return the current merge, or null if this
/// is done.
///
public virtual MergePolicy.OneMerge CurrentMerge
@@ -814,4 +814,4 @@ public override object Clone()
return clone;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Index/DocumentsWriterPerThread.cs b/src/Lucene.Net/Index/DocumentsWriterPerThread.cs
index e71af22fe9..6ac584bbdf 100644
--- a/src/Lucene.Net/Index/DocumentsWriterPerThread.cs
+++ b/src/Lucene.Net/Index/DocumentsWriterPerThread.cs
@@ -105,7 +105,7 @@ public class DocState
{
internal readonly DocumentsWriterPerThread docWriter;
internal Analyzer analyzer;
- internal InfoStream infoStream;
+ internal readonly InfoStream infoStream; // LUCENENET: marked readonly
internal Similarity similarity;
internal int docID;
internal IEnumerable doc;
diff --git a/src/Lucene.Net/Index/FreqProxTermsWriterPerField.cs b/src/Lucene.Net/Index/FreqProxTermsWriterPerField.cs
index 3fd3db8889..de5a18a812 100644
--- a/src/Lucene.Net/Index/FreqProxTermsWriterPerField.cs
+++ b/src/Lucene.Net/Index/FreqProxTermsWriterPerField.cs
@@ -328,11 +328,11 @@ public FreqProxPostingsArray(int size, bool writeFreqs, bool writeProx, bool wri
//System.out.println("PA init freqs=" + writeFreqs + " pos=" + writeProx + " offs=" + writeOffsets);
}
- internal int[] termFreqs; // # times this term occurs in the current doc
- internal int[] lastDocIDs; // Last docID where this term occurred
- internal int[] lastDocCodes; // Code for prior doc
- internal int[] lastPositions; // Last position where this term occurred
- internal int[] lastOffsets; // Last endOffset where this term occurred
+ internal readonly int[] termFreqs; // # times this term occurs in the current doc // LUCENENET: marked readonly
+ internal readonly int[] lastDocIDs; // Last docID where this term occurred // LUCENENET: marked readonly
+ internal readonly int[] lastDocCodes; // Code for prior doc // LUCENENET: marked readonly
+ internal readonly int[] lastPositions; // Last position where this term occurred // LUCENENET: marked readonly
+ internal readonly int[] lastOffsets; // Last endOffset where this term occurred // LUCENENET: marked readonly
internal override ParallelPostingsArray NewInstance(int size)
{
diff --git a/src/Lucene.Net/Index/FrozenBufferedUpdates.cs b/src/Lucene.Net/Index/FrozenBufferedUpdates.cs
index b9968577c1..7afdee629a 100644
--- a/src/Lucene.Net/Index/FrozenBufferedUpdates.cs
+++ b/src/Lucene.Net/Index/FrozenBufferedUpdates.cs
@@ -40,7 +40,7 @@ internal class FrozenBufferedUpdates
/// Terms, in sorted order:
internal readonly PrefixCodedTerms terms;
- internal int termCount; // just for debugging
+ internal readonly int termCount; // just for debugging // LUCENENET: marked readonly
/// Parallel array of deleted query, and the docIDUpto for each
internal readonly Query[] queries;
@@ -252,4 +252,4 @@ public virtual bool Any()
return termCount > 0 || queries.Length > 0 || numericDVUpdates.Length > 0 || binaryDVUpdates.Length > 0;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Index/IndexFileDeleter.cs b/src/Lucene.Net/Index/IndexFileDeleter.cs
index 4ad0b7c235..f412ac5998 100644
--- a/src/Lucene.Net/Index/IndexFileDeleter.cs
+++ b/src/Lucene.Net/Index/IndexFileDeleter.cs
@@ -750,12 +750,12 @@ public int DecRef()
private sealed class CommitPoint : IndexCommit
{
- internal ICollection files;
- internal string segmentsFileName;
+ internal readonly ICollection files; // LUCENENET: marked readonly
+ internal readonly string segmentsFileName; // LUCENENET: marked readonly
internal bool deleted;
- internal Directory directory;
- internal ICollection commitsToDelete;
- internal long generation;
+ internal readonly Directory directory; // LUCENENET: marked readonly
+ internal readonly ICollection commitsToDelete; // LUCENENET: marked readonly
+ internal readonly long generation; // LUCENENET: marked readonly
internal readonly IDictionary userData;
internal readonly int segmentCount;
diff --git a/src/Lucene.Net/Index/IndexWriter.cs b/src/Lucene.Net/Index/IndexWriter.cs
index f98cdd709f..be24ca1a11 100644
--- a/src/Lucene.Net/Index/IndexWriter.cs
+++ b/src/Lucene.Net/Index/IndexWriter.cs
@@ -271,7 +271,7 @@ public class IndexWriter : IDisposable, ITwoPhaseCommit
internal readonly AtomicInt32 flushCount = new AtomicInt32();
internal readonly AtomicInt32 flushDeletesCount = new AtomicInt32();
- internal ReaderPool readerPool;
+ internal readonly ReaderPool readerPool; // LUCENENET: marked readonly
internal readonly BufferedUpdatesStream bufferedUpdatesStream;
// this is a "write once" variable (like the organic dye
diff --git a/src/Lucene.Net/Index/StandardDirectoryReader.cs b/src/Lucene.Net/Index/StandardDirectoryReader.cs
index 19776dd153..e22a17f94d 100644
--- a/src/Lucene.Net/Index/StandardDirectoryReader.cs
+++ b/src/Lucene.Net/Index/StandardDirectoryReader.cs
@@ -506,10 +506,10 @@ public override IndexCommit IndexCommit
internal sealed class ReaderCommit : IndexCommit
{
- internal string segmentsFileName;
- internal ICollection files;
- internal Directory dir;
- internal long generation;
+ internal readonly string segmentsFileName; // LUCENENET: marked readonly
+ internal readonly ICollection files; // LUCENENET: marked readonly
+ internal readonly Directory dir; // LUCENENET: marked readonly
+ internal readonly long generation; // LUCENENET: marked readonly
internal readonly IDictionary userData;
internal readonly int segmentCount;
@@ -548,4 +548,4 @@ public override void Delete()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs b/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs
index 87d43314d6..64631edb3c 100644
--- a/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs
+++ b/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs
@@ -146,7 +146,7 @@ internal override bool Start(IIndexableField[] fields, int count)
/// Called once per field per document if term vectors
/// are enabled, to write the vectors to
/// RAMOutputStream, which is then quickly flushed to
- /// the real term vectors files in the Directory.
+ /// the real term vectors files in the Directory.
///
internal override void Finish()
{
@@ -330,9 +330,9 @@ public TermVectorsPostingsArray(int size)
lastPositions = new int[size];
}
- internal int[] freqs; // How many times this term occurred in the current doc
- internal int[] lastOffsets; // Last offset we saw
- internal int[] lastPositions; // Last position where this term occurred
+ internal readonly int[] freqs; // How many times this term occurred in the current doc // LUCENENET: marked readonly
+ internal readonly int[] lastOffsets; // Last offset we saw // LUCENENET: marked readonly
+ internal readonly int[] lastPositions; // Last position where this term occurred // LUCENENET: marked readonly
internal override ParallelPostingsArray NewInstance(int size)
{
@@ -357,4 +357,4 @@ internal override int BytesPerPosting()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Search/BooleanScorer2.cs b/src/Lucene.Net/Search/BooleanScorer2.cs
index 7fa1c89fb3..fffdf5165b 100644
--- a/src/Lucene.Net/Search/BooleanScorer2.cs
+++ b/src/Lucene.Net/Search/BooleanScorer2.cs
@@ -114,7 +114,7 @@ private class SingleMatchScorer : Scorer
{
private readonly BooleanScorer2 outerInstance;
- internal Scorer scorer;
+ internal readonly Scorer scorer; // LUCENENET: marked readonly
internal int lastScoredDoc = -1;
// Save the score of lastScoredDoc, so that we don't compute it more than
@@ -274,7 +274,7 @@ private Scorer DualConjunctionSumScorer(/* bool disableCoord, // LUCENENET: Not
///
private Scorer MakeCountingSumScorer(/* bool disableCoord // LUCENENET: Not Referenced */) // each scorer counted as a single matcher
{
- return (requiredScorers.Count == 0)
+ return (requiredScorers.Count == 0)
? MakeCountingSumScorerNoReq(/* disableCoord // LUCENENET: Not Referenced */)
: MakeCountingSumScorerSomeReq(/* disableCoord // LUCENENET: Not Referenced */);
}
@@ -376,4 +376,4 @@ public override ICollection GetChildren()
return children;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs b/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs
index 083c9d3b60..0f2dd95b9c 100644
--- a/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs
+++ b/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs
@@ -178,7 +178,7 @@ public override bool Collect(BytesRef bytes)
internal readonly int docCountCutoff, termCountLimit;
internal readonly TermStateByteStart array = new TermStateByteStart(16);
- internal BytesRefHash pendingTerms;
+ internal readonly BytesRefHash pendingTerms; // LUCENENET: marked readonly
}
public override int GetHashCode()
diff --git a/src/Lucene.Net/Search/ExactPhraseScorer.cs b/src/Lucene.Net/Search/ExactPhraseScorer.cs
index bcc7aaa600..ed33290c1a 100644
--- a/src/Lucene.Net/Search/ExactPhraseScorer.cs
+++ b/src/Lucene.Net/Search/ExactPhraseScorer.cs
@@ -33,7 +33,7 @@ internal sealed class ExactPhraseScorer : Scorer
private readonly int[] counts = new int[CHUNK];
private readonly int[] gens = new int[CHUNK];
- internal bool noDocs;
+ internal readonly bool noDocs; // LUCENENET: marked readonly
private readonly long cost;
private sealed class ChunkState
@@ -358,4 +358,4 @@ public override long GetCost()
return cost;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Search/FieldCacheImpl.cs b/src/Lucene.Net/Search/FieldCacheImpl.cs
index ef25545cae..b038c5c377 100644
--- a/src/Lucene.Net/Search/FieldCacheImpl.cs
+++ b/src/Lucene.Net/Search/FieldCacheImpl.cs
@@ -290,7 +290,7 @@ private protected Cache(FieldCacheImpl wrapper) // LUCENENET: Changed from inter
internal readonly FieldCacheImpl wrapper;
- internal ConditionalWeakTable> readerCache = new ConditionalWeakTable>();
+ internal readonly ConditionalWeakTable> readerCache = new ConditionalWeakTable>(); // LUCENENET: marked readonly
protected abstract TValue CreateValue(AtomicReader reader, TKey key, bool setDocsWithField);
diff --git a/src/Lucene.Net/Search/Payloads/PayloadNearQuery.cs b/src/Lucene.Net/Search/Payloads/PayloadNearQuery.cs
index 816dc404dd..558b27f9ed 100644
--- a/src/Lucene.Net/Search/Payloads/PayloadNearQuery.cs
+++ b/src/Lucene.Net/Search/Payloads/PayloadNearQuery.cs
@@ -210,7 +210,7 @@ public class PayloadNearSpanScorer : SpanScorer
{
private readonly PayloadNearQuery outerInstance;
- internal Spans spans;
+ internal readonly Spans spans; // LUCENENET: marked readonly
protected internal float m_payloadScore;
internal int payloadsSeen;
@@ -251,7 +251,7 @@ public virtual void GetPayloads(Spans[] subSpans)
}
// TODO change the whole spans api to use bytesRef, or nuke spans
- internal BytesRef scratch = new BytesRef();
+ internal readonly BytesRef scratch = new BytesRef(); // LUCENENET: marked readonly
///
/// By default, uses the to score the payloads, but
diff --git a/src/Lucene.Net/Search/PhrasePositions.cs b/src/Lucene.Net/Search/PhrasePositions.cs
index d90d4b4e4a..a9bd7b7e5d 100644
--- a/src/Lucene.Net/Search/PhrasePositions.cs
+++ b/src/Lucene.Net/Search/PhrasePositions.cs
@@ -27,7 +27,7 @@ internal sealed class PhrasePositions
internal int doc; // current doc
internal int position; // position in doc
internal int count; // remaining pos in this doc
- internal int offset; // position in phrase
+ internal readonly int offset; // position in phrase // LUCENENET: marked readonly
internal readonly int ord; // unique across all PhrasePositions instances
internal readonly DocsAndPositionsEnum postings; // stream of docs & positions
internal PhrasePositions next; // used to make lists
@@ -100,4 +100,4 @@ public override string ToString()
return s;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Search/PhraseQuery.cs b/src/Lucene.Net/Search/PhraseQuery.cs
index b579477448..cc0f1e0164 100644
--- a/src/Lucene.Net/Search/PhraseQuery.cs
+++ b/src/Lucene.Net/Search/PhraseQuery.cs
@@ -304,7 +304,7 @@ private class PhraseWeight : Weight
internal readonly Similarity.SimWeight stats;
- internal TermContext[] states;
+ internal readonly TermContext[] states; // LUCENENET: marked readonly
public PhraseWeight(PhraseQuery outerInstance, IndexSearcher searcher)
{
diff --git a/src/Lucene.Net/Search/ScoringRewrite.cs b/src/Lucene.Net/Search/ScoringRewrite.cs
index 31e4996fd7..cd90ba3a84 100644
--- a/src/Lucene.Net/Search/ScoringRewrite.cs
+++ b/src/Lucene.Net/Search/ScoringRewrite.cs
@@ -147,7 +147,7 @@ public ParallelArraysTermCollector(ScoringRewrite outerInstance)
}
internal readonly TermFreqBoostByteStart array = new TermFreqBoostByteStart(16);
- internal BytesRefHash terms;
+ internal readonly BytesRefHash terms; // LUCENENET: marked readonly
internal TermsEnum termsEnum;
private IBoostAttribute boostAtt;
diff --git a/src/Lucene.Net/Search/SortField.cs b/src/Lucene.Net/Search/SortField.cs
index 299ad948cf..4dd0980558 100644
--- a/src/Lucene.Net/Search/SortField.cs
+++ b/src/Lucene.Net/Search/SortField.cs
@@ -48,7 +48,7 @@ public class SortField
private string field;
private SortFieldType type; // defaults to determining type dynamically
- internal bool reverse = false; // defaults to natural order
+ internal readonly bool reverse /*= false*/; // defaults to natural order // LUCENENET: marked readonly
private readonly FieldCache.IParser parser; // LUCENENET: marked readonly
// Used for CUSTOM sort
diff --git a/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs b/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs
index ef81809b2b..ab60f8c09a 100644
--- a/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs
+++ b/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs
@@ -80,7 +80,7 @@ private class SpansCell : Spans
{
private readonly NearSpansUnordered outerInstance;
- internal Spans spans;
+ internal readonly Spans spans; // LUCENENET: marked readonly
internal SpansCell next;
private int length = -1;
private readonly int index; // LUCENENET: marked readonly
@@ -384,4 +384,4 @@ private void ListToQueue()
private bool AtMatch => (Min.Doc == max.Doc) && ((max.End - Min.Start - totalLength) <= slop);
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Store/SimpleFSLockFactory.cs b/src/Lucene.Net/Store/SimpleFSLockFactory.cs
index 29733cd43b..27e0932b87 100644
--- a/src/Lucene.Net/Store/SimpleFSLockFactory.cs
+++ b/src/Lucene.Net/Store/SimpleFSLockFactory.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Store
*/
///
- /// Implements using
- ///
+ /// Implements using
+ ///
/// (writes the file with UTF8 encoding and no byte order mark).
///
/// Special care needs to be taken if you change the locking
@@ -36,7 +36,7 @@ namespace Lucene.Net.Store
///
/// If you suspect that this or any other is
/// not working properly in your environment, you can easily
- /// test it by using ,
+ /// test it by using ,
/// and .
///
///
@@ -103,8 +103,8 @@ public override void ClearLock(string lockName)
internal class SimpleFSLock : Lock
{
- internal FileInfo lockFile;
- internal DirectoryInfo lockDir;
+ internal readonly FileInfo lockFile; // LUCENENET: marked readonly
+ internal readonly DirectoryInfo lockDir; // LUCENENET: marked readonly
public SimpleFSLock(DirectoryInfo lockDir, string lockFileName)
{
@@ -184,4 +184,4 @@ public override string ToString()
return "SimpleFSLock@" + lockFile;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Store/SingleInstanceLockFactory.cs b/src/Lucene.Net/Store/SingleInstanceLockFactory.cs
index f78bc0b54a..ba411b7337 100644
--- a/src/Lucene.Net/Store/SingleInstanceLockFactory.cs
+++ b/src/Lucene.Net/Store/SingleInstanceLockFactory.cs
@@ -61,7 +61,7 @@ public override void ClearLock(string lockName)
internal class SingleInstanceLock : Lock
{
- internal string lockName;
+ internal readonly string lockName; // LUCENENET: marked readonly
private readonly JCG.HashSet locks;
public SingleInstanceLock(JCG.HashSet locks, string lockName)
@@ -117,4 +117,4 @@ public override string ToString()
return base.ToString() + ": " + lockName;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs b/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs
index 48a8b1cd51..8ef25fd129 100644
--- a/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs
+++ b/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs
@@ -48,9 +48,9 @@ public class LevenshteinAutomata
/* the ranges outside of alphabet */
internal readonly int[] rangeLower;
internal readonly int[] rangeUpper;
- internal int numRanges = 0;
+ internal readonly int numRanges /*= 0*/; // LUCENENET: marked readonly
- internal ParametricDescription[] descriptions;
+ internal readonly ParametricDescription[] descriptions; // LUCENENET: marked readonly
///
/// Create a new for some string.
@@ -338,4 +338,4 @@ protected internal virtual int Unpack(long[] data, int index, int bitsPerValue)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/Automaton/RegExp.cs b/src/Lucene.Net/Util/Automaton/RegExp.cs
index 1e7a6c975b..92c7bb5dc8 100644
--- a/src/Lucene.Net/Util/Automaton/RegExp.cs
+++ b/src/Lucene.Net/Util/Automaton/RegExp.cs
@@ -108,7 +108,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
unionexp
/// ::=
@@ -123,7 +123,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
interexp
/// ::=
@@ -138,7 +138,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
concatexp
/// ::=
@@ -153,7 +153,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
repeatexp
/// ::=
@@ -203,7 +203,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
complexp
/// ::=
@@ -218,7 +218,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
charclassexp
/// ::=
@@ -240,7 +240,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
charclasses
/// ::=
@@ -255,7 +255,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
charclass
/// ::=
@@ -270,7 +270,7 @@ public enum RegExpSyntax
///
///
///
- ///
+ ///
/// -
///
simpleexp
/// ::=
@@ -334,7 +334,7 @@ public enum RegExpSyntax
/// (numerical interval)
/// [OPTIONAL]
///
- ///
+ ///
/// -
///
charexp
/// ::=
@@ -349,9 +349,9 @@ public enum RegExpSyntax
/// (a single character)
///
///
- ///
+ ///
///
- ///
+ ///
///
/// The productions marked [OPTIONAL] are only allowed if
/// specified by the syntax flags passed to the constructor.
@@ -402,8 +402,8 @@ internal enum Kind
internal int min, max, digits;
internal int from, to;
- internal string b;
- internal RegExpSyntax flags;
+ internal readonly string b; // LUCENENET: marked readonly
+ internal readonly RegExpSyntax flags; // LUCENENET: marked readonly
internal int pos;
internal RegExp()
@@ -1336,4 +1336,4 @@ internal int ParseCharExp()
return Next();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/Automaton/SortedIntSet.cs b/src/Lucene.Net/Util/Automaton/SortedIntSet.cs
index 673a8fda5f..8fc175131f 100644
--- a/src/Lucene.Net/Util/Automaton/SortedIntSet.cs
+++ b/src/Lucene.Net/Util/Automaton/SortedIntSet.cs
@@ -264,9 +264,9 @@ public override string ToString()
///
public struct FrozenInt32Set : IEquatable
{
- internal int[] values;
- internal int hashCode;
- internal State state;
+ internal readonly int[] values; // LUCENENET: marked readonly
+ internal readonly int hashCode; // LUCENENET: marked readonly
+ internal readonly State state; // LUCENENET: marked readonly
public FrozenInt32Set(int[] values, int hashCode, State state)
{
@@ -353,4 +353,4 @@ public override string ToString()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/Automaton/State.cs b/src/Lucene.Net/Util/Automaton/State.cs
index ccc5ca5aff..20ea792de1 100644
--- a/src/Lucene.Net/Util/Automaton/State.cs
+++ b/src/Lucene.Net/Util/Automaton/State.cs
@@ -57,7 +57,7 @@ public class State : IComparable
internal int number;
- internal int id;
+ internal readonly int id; // LUCENENET: marked readonly
internal static int next_id;
///
diff --git a/src/Lucene.Net/Util/Automaton/StatePair.cs b/src/Lucene.Net/Util/Automaton/StatePair.cs
index 79d1e168ff..7775352f0d 100644
--- a/src/Lucene.Net/Util/Automaton/StatePair.cs
+++ b/src/Lucene.Net/Util/Automaton/StatePair.cs
@@ -39,8 +39,8 @@ namespace Lucene.Net.Util.Automaton
public class StatePair
{
internal State s;
- internal State s1;
- internal State s2;
+ internal readonly State s1; // LUCENENET: marked readonly
+ internal readonly State s2; // LUCENENET: marked readonly
internal StatePair(State s, State s1, State s2)
{
diff --git a/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs b/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs
index c5c8c84bf1..208c15d061 100644
--- a/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs
+++ b/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs
@@ -392,4 +392,4 @@ private State NewUTF8State()
return s;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/CollectionUtil.cs b/src/Lucene.Net/Util/CollectionUtil.cs
index 71c548096c..95d443b168 100644
--- a/src/Lucene.Net/Util/CollectionUtil.cs
+++ b/src/Lucene.Net/Util/CollectionUtil.cs
@@ -34,7 +34,7 @@ public static class CollectionUtil // LUCENENET specific - made static
private sealed class ListIntroSorter : IntroSorter
{
internal T pivot;
- internal IList list;
+ internal readonly IList list; // LUCENENET: marked readonly
internal readonly IComparer comp;
internal ListIntroSorter(IList list, IComparer comp)
@@ -45,7 +45,7 @@ internal ListIntroSorter(IList list, IComparer comp)
//{
// throw new ArgumentException("CollectionUtil can only sort random access lists in-place.");
//}
-
+
this.list = list;
this.comp = comp;
}
@@ -77,7 +77,7 @@ protected override int ComparePivot(int j)
private sealed class ListTimSorter : TimSorter
{
- internal IList list;
+ internal readonly IList list; // LUCENENET: marked readonly
internal readonly IComparer comp;
internal readonly T[] tmp;
@@ -144,7 +144,7 @@ protected override int CompareSaved(int i, int j)
///
/// Sorts the given using the .
/// This method uses the intro sort
- /// algorithm, but falls back to insertion sort for small lists.
+ /// algorithm, but falls back to insertion sort for small lists.
///
/// This
/// The to use for the sort.
@@ -161,7 +161,7 @@ public static void IntroSort(IList list, IComparer comp)
///
/// Sorts the given random access in natural order.
/// This method uses the intro sort
- /// algorithm, but falls back to insertion sort for small lists.
+ /// algorithm, but falls back to insertion sort for small lists.
///
/// This
public static void IntroSort(IList list)
@@ -211,4 +211,4 @@ public static void TimSort(IList list)
TimSort(list, ArrayUtil.GetNaturalComparer());
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/IndexableBinaryStringTools.cs b/src/Lucene.Net/Util/IndexableBinaryStringTools.cs
index 68e8f5c122..c63632fcbd 100644
--- a/src/Lucene.Net/Util/IndexableBinaryStringTools.cs
+++ b/src/Lucene.Net/Util/IndexableBinaryStringTools.cs
@@ -299,8 +299,8 @@ public static void Decode(char[] inputArray, int inputOffset, int inputLength, s
internal class CodingCase
{
- internal int numBytes, initialShift, middleShift, finalShift, advanceBytes = 2;
- internal short middleMask, finalMask;
+ internal readonly int numBytes, initialShift, middleShift, finalShift, advanceBytes = 2; // LUCENENET: marked readonly
+ internal readonly short middleMask, finalMask; // LUCENENET: marked readonly
internal CodingCase(int initialShift, int middleShift, int finalShift)
{
diff --git a/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs b/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs
index 61578feb4c..64d10bb0c2 100644
--- a/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs
+++ b/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Util.Packed
*/
///
- /// Common functionality shared by and .
+ /// Common functionality shared by and .
///
/// NOTE: This was AbstractAppendingLongBuffer in Lucene
///
@@ -40,7 +40,7 @@ public abstract class AbstractAppendingInt64Buffer : Int64Values // LUCENENET NO
internal int valuesOff;
internal long[] pending;
internal int pendingOff;
- internal float acceptableOverheadRatio;
+ internal readonly float acceptableOverheadRatio; // LUCENENET: marked readonly
private protected AbstractAppendingInt64Buffer(int initialBlockCount, int pageSize, float acceptableOverheadRatio) // LUCENENET: Changed from internal to private protected
{
@@ -142,7 +142,7 @@ public int Get(long index, long[] arr, int off, int len)
internal abstract int Get(int block, int element, long[] arr, int off, int len);
///
- /// Return an iterator over the values of this buffer.
+ /// Return an iterator over the values of this buffer.
///
public virtual Iterator GetIterator()
{
@@ -220,11 +220,11 @@ public long Next()
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal virtual long BaseRamBytesUsed()
{
- return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
- + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF
- + 2 * RamUsageEstimator.NUM_BYTES_INT32
- + 2 * RamUsageEstimator.NUM_BYTES_INT32
- + RamUsageEstimator.NUM_BYTES_SINGLE
+ return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
+ + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF
+ + 2 * RamUsageEstimator.NUM_BYTES_INT32
+ + 2 * RamUsageEstimator.NUM_BYTES_INT32
+ + RamUsageEstimator.NUM_BYTES_SINGLE
+ RamUsageEstimator.NUM_BYTES_INT64; // valuesBytes - acceptable overhead - pageShift, pageMask - the 2 offsets - the 2 arrays
}
@@ -256,4 +256,4 @@ public virtual void Freeze()
pending = null;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/PriorityQueue.cs b/src/Lucene.Net/Util/PriorityQueue.cs
index 6f3e680b55..5f3f74e97d 100644
--- a/src/Lucene.Net/Util/PriorityQueue.cs
+++ b/src/Lucene.Net/Util/PriorityQueue.cs
@@ -128,6 +128,7 @@ public abstract class PriorityComparer : IComparer
///
/// @lucene.internal
///
+ [SuppressMessage("ReSharper", "FieldCanBeMadeReadOnly.Local")]
public ref struct ValuePriorityQueue
{
private int size;
diff --git a/src/Lucene.Net/Util/RollingBuffer.cs b/src/Lucene.Net/Util/RollingBuffer.cs
index 519646e039..93f0e10e48 100644
--- a/src/Lucene.Net/Util/RollingBuffer.cs
+++ b/src/Lucene.Net/Util/RollingBuffer.cs
@@ -72,7 +72,7 @@ public abstract class RollingBuffer
// How many valid Position are held in the
// array:
private int count;
- private IRollingBufferItemFactory itemFactory;
+ private readonly IRollingBufferItemFactory itemFactory; // LUCENENET: marked readonly
protected RollingBuffer(IRollingBufferItemFactory itemFactory)
{
@@ -187,4 +187,4 @@ public virtual void FreeBefore(int pos)
count -= toFree;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net/Util/TimSorter.cs b/src/Lucene.Net/Util/TimSorter.cs
index bbc9525d8a..35553b3362 100644
--- a/src/Lucene.Net/Util/TimSorter.cs
+++ b/src/Lucene.Net/Util/TimSorter.cs
@@ -52,7 +52,7 @@ public abstract class TimSorter : Sorter
internal int minRun;
internal int to;
internal int stackSize;
- internal int[] runEnds;
+ internal readonly int[] runEnds; // LUCENENET: marked readonly
///
/// Create a new .