t

com.johnsnowlabs.ml.tensorflow

MedicalClassification

trait MedicalClassification extends AnyRef

Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. MedicalClassification
  2. AnyRef
  3. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. All

Abstract Value Members

  1. abstract def findIndexedToken(tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tokenPiece: TokenPiece): Option[IndexedToken]
  2. abstract def tag(batch: Seq[Array[Int]], useTokenTypes: Boolean = true): Seq[Array[Array[Float]]]
  3. abstract def tagSequence(batch: Seq[Array[Int]], useTokenTypes: Boolean = true): Array[Array[Float]]
  4. abstract def tokenizeWithAlignment(sentences: Seq[TokenizedSentence], maxSeqLength: Int, caseSensitive: Boolean): Seq[WordpieceTokenizedSentence]

Concrete Value Members

  1. def calculateSoftmax(scores: Array[Float]): Array[Float]
  2. def encode(sentences: Seq[(WordpieceTokenizedSentence, Int)], maxSequenceLength: Int): Seq[Array[Int]]

    Encode the input sequence to indexes IDs adding padding where necessary

  3. def predict(tokenizedSentences: Seq[TokenizedSentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, tags: Map[String, Int], useTokenTypes: Boolean = true): Seq[Annotation]
  4. def predictSequence(tokenizedSentences: Seq[TokenizedSentence], sentences: Seq[Sentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, coalesceSentences: Boolean = false, tags: Map[String, Int], useTokenTypes: Boolean = true): Seq[Annotation]
  5. def wordAndSpanLevelAlignmentWithTokenizer(tokenLogits: Array[Array[Float]], tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tags: Map[String, Int]): Seq[Annotation]

    Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization

    Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization

    ### Input orig_tokens = ["John", "Johanson", "'s", "house"] labels = ["NNP", "NNP", "POS", "NN"]

    # bert_tokens == ["[CLS]", "john", "johan", "##son", "'", "s", "house", "[SEP]"] # orig_to_tok_map == [1, 2, 4, 6]