com.johnsnowlabs.nlp.annotators.seq2seq
MedicalEncoderDecoder
Companion object MedicalEncoderDecoder
class MedicalEncoderDecoder extends AnnotatorModel[MedicalEncoderDecoder] with HasCaseSensitiveProperties with HasBatchedAnnotate[MedicalEncoderDecoder] with ParamsAndFeaturesWritable with WriteTensorflowModel with WriteOnnxModel with HasEngine with WriteSentencePieceModel with CheckLicense
- Grouped
- Alphabetic
- By Inheritance
- MedicalEncoderDecoder
- CheckLicense
- WriteSentencePieceModel
- HasEngine
- WriteOnnxModel
- WriteTensorflowModel
- HasBatchedAnnotate
- HasCaseSensitiveProperties
- AnnotatorModel
- CanBeLazy
- RawAnnotator
- HasOutputAnnotationCol
- HasInputAnnotationCols
- HasOutputAnnotatorType
- ParamsAndFeaturesWritable
- HasFeatures
- DefaultParamsWritable
- MLWritable
- Model
- Transformer
- PipelineStage
- Logging
- Params
- Serializable
- Serializable
- Identifiable
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Type Members
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
$[T](param: Param[T]): T
- Attributes
- protected
- Definition Classes
- Params
-
def
$$[T](feature: StructFeature[T]): T
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
$$[K, V](feature: MapFeature[K, V]): Map[K, V]
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
$$[T](feature: SetFeature[T]): Set[T]
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
$$[T](feature: ArrayFeature[T]): Array[T]
- Attributes
- protected
- Definition Classes
- HasFeatures
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
_transform(dataset: Dataset[_], recursivePipeline: Option[PipelineModel]): DataFrame
- Attributes
- protected
- Definition Classes
- AnnotatorModel
-
def
afterAnnotate(dataset: DataFrame): DataFrame
- Attributes
- protected
- Definition Classes
- AnnotatorModel
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
batchAnnotate(batchedAnnotations: Seq[Array[Annotation]]): Seq[Seq[Annotation]]
- Definition Classes
- MedicalEncoderDecoder → HasBatchedAnnotate
-
def
batchProcess(rows: Iterator[_]): Iterator[Row]
- Definition Classes
- HasBatchedAnnotate
-
val
batchSize: IntParam
- Definition Classes
- HasBatchedAnnotate
-
def
beforeAnnotate(dataset: Dataset[_]): Dataset[_]
- Attributes
- protected
- Definition Classes
- AnnotatorModel
-
val
caseSensitive: BooleanParam
- Definition Classes
- HasCaseSensitiveProperties
-
final
def
checkSchema(schema: StructType, inputAnnotatorType: String): Boolean
- Attributes
- protected
- Definition Classes
- HasInputAnnotationCols
-
def
checkValidEnvironment(spark: Option[SparkSession], scopes: Seq[String]): Unit
- Definition Classes
- CheckLicense
-
def
checkValidScope(scope: String): Unit
- Definition Classes
- CheckLicense
-
def
checkValidScopeAndEnvironment(scope: String, spark: Option[SparkSession], checkLp: Boolean): Unit
- Definition Classes
- CheckLicense
-
def
checkValidScopesAndEnvironment(scopes: Seq[String], spark: Option[SparkSession], checkLp: Boolean): Unit
- Definition Classes
- CheckLicense
-
final
def
clear(param: Param[_]): MedicalEncoderDecoder.this.type
- Definition Classes
- Params
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
val
configProtoBytes: IntArrayParam
ConfigProto from tensorflow, serialized into byte array.
ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()
-
def
copy(extra: ParamMap): MedicalEncoderDecoder
- Definition Classes
- RawAnnotator → Model → Transformer → PipelineStage → Params
-
def
copyValues[T <: Params](to: T, extra: ParamMap): T
- Attributes
- protected
- Definition Classes
- Params
-
final
def
defaultCopy[T <: Params](extra: ParamMap): T
- Attributes
- protected
- Definition Classes
- Params
-
val
doSample: BooleanParam
Whether or not to use sampling, use greedy decoding otherwise (Default:
false
) -
val
engine: Param[String]
- Definition Classes
- HasEngine
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
explainParam(param: Param[_]): String
- Definition Classes
- Params
-
def
explainParams(): String
- Definition Classes
- Params
-
def
extraValidate(structType: StructType): Boolean
- Attributes
- protected
- Definition Classes
- RawAnnotator
-
def
extraValidateMsg: String
- Attributes
- protected
- Definition Classes
- RawAnnotator
-
final
def
extractParamMap(): ParamMap
- Definition Classes
- Params
-
final
def
extractParamMap(extra: ParamMap): ParamMap
- Definition Classes
- Params
-
val
features: ArrayBuffer[Feature[_, _, _]]
- Definition Classes
- HasFeatures
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
get[T](feature: StructFeature[T]): Option[T]
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
get[K, V](feature: MapFeature[K, V]): Option[Map[K, V]]
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
get[T](feature: SetFeature[T]): Option[Set[T]]
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
get[T](feature: ArrayFeature[T]): Option[Array[T]]
- Attributes
- protected
- Definition Classes
- HasFeatures
-
final
def
get[T](param: Param[T]): Option[T]
- Definition Classes
- Params
-
def
getBatchSize: Int
- Definition Classes
- HasBatchedAnnotate
-
def
getCaseSensitive: Boolean
- Definition Classes
- HasCaseSensitiveProperties
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- def getConfigProtoBytes: Option[Array[Byte]]
-
final
def
getDefault[T](param: Param[T]): Option[T]
- Definition Classes
- Params
- def getDoSample: Boolean
-
def
getEngine: String
- Definition Classes
- HasEngine
- def getIgnoreTokenIds: Array[Int]
-
def
getInputCols: Array[String]
- Definition Classes
- HasInputAnnotationCols
-
def
getLazyAnnotator: Boolean
- Definition Classes
- CanBeLazy
- def getMaxNewTokens: Int
- def getMaxTextLength: Int
-
def
getMlFrameworkType: String
Get ML framework type
- def getModelIfNotSet: MedicalEncoderDecoderModel
- def getNoRepeatNgramSize: Int
-
final
def
getOrDefault[T](param: Param[T]): T
- Definition Classes
- Params
-
final
def
getOutputCol: String
- Definition Classes
- HasOutputAnnotationCol
-
def
getParam(paramName: String): Param[Any]
- Definition Classes
- Params
- def getRandomSeed: Option[Int]
- def getRefineChunkSize: Int
- def getRefineMaxAttempts: Int
- def getRefineSummary: Boolean
- def getRefineSummaryTargetLength: Int
- def getSignatures: Option[Map[String, String]]
-
def
getStopAtEos: Boolean
Checks whether text generation stops when the end-of-sentence token is encountered.
-
def
getTask: String
- Attributes
- protected
-
def
getTopK: Int
- Attributes
- protected
- def getUseCache: Boolean
-
final
def
hasDefault[T](param: Param[T]): Boolean
- Definition Classes
- Params
-
def
hasParam(paramName: String): Boolean
- Definition Classes
- Params
-
def
hasParent: Boolean
- Definition Classes
- Model
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
var
ignoreTokenIds: IntArrayParam
A list of token ids which are ignored in the decoder's output (Default:
Array()
) -
def
initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- Attributes
- protected
- Definition Classes
- Logging
-
def
initializeLogIfNecessary(isInterpreter: Boolean): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
val
inputAnnotatorTypes: Array[AnnotatorType]
Input annotator type : DOCUMENT
Input annotator type : DOCUMENT
- Definition Classes
- MedicalEncoderDecoder → HasInputAnnotationCols
-
final
val
inputCols: StringArrayParam
- Attributes
- protected
- Definition Classes
- HasInputAnnotationCols
-
final
def
isDefined(param: Param[_]): Boolean
- Definition Classes
- Params
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
final
def
isSet(param: Param[_]): Boolean
- Definition Classes
- Params
-
def
isTraceEnabled(): Boolean
- Attributes
- protected
- Definition Classes
- Logging
-
val
lazyAnnotator: BooleanParam
- Definition Classes
- CanBeLazy
-
def
log: Logger
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logError(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logError(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logName: String
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
val
maxNewTokens: IntParam
Maximum number of new tokens to be generated (Default: 30)
-
val
maxTextLength: IntParam
Maximum length of context text.
Maximum length of context text. (Default:
1024
) -
val
mlFrameworkType: Param[String]
ML framework type
-
def
msgHelper(schema: StructType): String
- Attributes
- protected
- Definition Classes
- HasInputAnnotationCols
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
val
noRepeatNgramSize: IntParam
If set to int >
0
, all ngrams of that size can only occur once (Default:0
) -
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
def
onWrite(path: String, spark: SparkSession): Unit
- Definition Classes
- MedicalEncoderDecoder → ParamsAndFeaturesWritable
-
def
onWriteSkip(path: String, spark: SparkSession): Unit
- Attributes
- protected
-
val
optionalInputAnnotatorTypes: Array[String]
- Definition Classes
- HasInputAnnotationCols
-
val
outputAnnotatorType: String
Output annotator type : DOCUMENT
Output annotator type : DOCUMENT
- Definition Classes
- MedicalEncoderDecoder → HasOutputAnnotatorType
-
final
val
outputCol: Param[String]
- Attributes
- protected
- Definition Classes
- HasOutputAnnotationCol
-
lazy val
params: Array[Param[_]]
- Definition Classes
- Params
-
var
parent: Estimator[MedicalEncoderDecoder]
- Definition Classes
- Model
-
var
randomSeed: Option[Int]
Optional Random seed for the model.
Optional Random seed for the model. Needs to be of type
Long
. -
val
refineChunkSize: IntParam
How large should refined chunks Be.
How large should refined chunks Be. Should be equal to LLM context window size in tokens. Takes only effect when refineSummary=True
-
val
refineMaxAttempts: IntParam
How many times should chunks be re-summarized while they are above SummaryTargetLength before stopping.
How many times should chunks be re-summarized while they are above SummaryTargetLength before stopping. Takes only effect when refineSummary=True
-
val
refineSummary: BooleanParam
Set true to perform refined summarization at increased computation cost.
-
val
refineSummaryTargetLength: IntParam
Target length for refined summary.
Target length for refined summary. Text will be chunked and re-summarized until it is below this length or maximum attempts are used up.
-
def
save(path: String): Unit
- Definition Classes
- MLWritable
- Annotations
- @Since( "1.6.0" ) @throws( ... )
-
def
set[T](feature: StructFeature[T], value: T): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
set[K, V](feature: MapFeature[K, V], value: Map[K, V]): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
set[T](feature: SetFeature[T], value: Set[T]): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
set[T](feature: ArrayFeature[T], value: Array[T]): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
final
def
set(paramPair: ParamPair[_]): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- Params
-
final
def
set(param: String, value: Any): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- Params
-
final
def
set[T](param: Param[T], value: T): MedicalEncoderDecoder.this.type
- Definition Classes
- Params
-
def
setBatchSize(size: Int): MedicalEncoderDecoder.this.type
- Definition Classes
- HasBatchedAnnotate
-
def
setCaseSensitive(value: Boolean): MedicalEncoderDecoder.this.type
- Definition Classes
- HasCaseSensitiveProperties
- def setConfigProtoBytes(bytes: Array[Int]): MedicalEncoderDecoder.this.type
-
def
setDefault[T](feature: StructFeature[T], value: () ⇒ T): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
setDefault[K, V](feature: MapFeature[K, V], value: () ⇒ Map[K, V]): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
setDefault[T](feature: SetFeature[T], value: () ⇒ Set[T]): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
def
setDefault[T](feature: ArrayFeature[T], value: () ⇒ Array[T]): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- HasFeatures
-
final
def
setDefault(paramPairs: ParamPair[_]*): MedicalEncoderDecoder.this.type
- Attributes
- protected
- Definition Classes
- Params
-
final
def
setDefault[T](param: Param[T], value: T): MedicalEncoderDecoder.this.type
- Attributes
- protected[org.apache.spark.ml]
- Definition Classes
- Params
- def setDoSample(value: Boolean): MedicalEncoderDecoder.this.type
- def setIgnoreTokenIds(tokenIds: Array[Int]): MedicalEncoderDecoder.this.type
-
final
def
setInputCols(value: String*): MedicalEncoderDecoder.this.type
- Definition Classes
- HasInputAnnotationCols
-
def
setInputCols(value: Array[String]): MedicalEncoderDecoder.this.type
- Definition Classes
- HasInputAnnotationCols
-
def
setLazyAnnotator(value: Boolean): MedicalEncoderDecoder.this.type
- Definition Classes
- CanBeLazy
- def setMaxNewTokens(value: Int): MedicalEncoderDecoder.this.type
- def setMaxTextLength(value: Int): MedicalEncoderDecoder.this.type
-
def
setMlFrameworkType(value: String): MedicalEncoderDecoder.this.type
Set ML framework type
- def setModelIfNotSet(spark: SparkSession, encoder: OnnxWrapper, decoder: OnnxWrapper, spp: SentencePieceWrapper): MedicalEncoderDecoder.this.type
- def setModelIfNotSet(spark: SparkSession, tfWrapper: TensorflowWrapper, spp: SentencePieceWrapper, useCache: Boolean): MedicalEncoderDecoder.this.type
- def setNoRepeatNgramSize(value: Int): MedicalEncoderDecoder.this.type
-
final
def
setOutputCol(value: String): MedicalEncoderDecoder.this.type
- Definition Classes
- HasOutputAnnotationCol
-
def
setParent(parent: Estimator[MedicalEncoderDecoder]): MedicalEncoderDecoder
- Definition Classes
- Model
- def setRandomSeed(value: Int): MedicalEncoderDecoder.this.type
- def setRefineChunkSize(value: Int): MedicalEncoderDecoder.this.type
- def setRefineMaxAttempts(value: Int): MedicalEncoderDecoder.this.type
- def setRefineSummary(value: Boolean): MedicalEncoderDecoder.this.type
- def setRefineSummaryTargetLength(value: Int): MedicalEncoderDecoder.this.type
- def setSignatures(value: Map[String, String]): MedicalEncoderDecoder.this.type
-
def
setStopAtEos(value: Boolean): MedicalEncoderDecoder.this.type
Determines whether text generation stops when the end-of-sentence token is encountered.
-
def
setTask(value: String): MedicalEncoderDecoder.this.type
- Attributes
- protected
- def setTopK(value: Int): MedicalEncoderDecoder.this.type
-
def
setUseCache(value: Boolean): MedicalEncoderDecoder.this.type
- Attributes
- protected
- val signatures: MapFeature[String, String]
- val stopAtEos: BooleanParam
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
val
task: Param[String]
Set transformer task, e.g.
Set transformer task, e.g.
"summarize:"
(Default:""
). The T5 task needs to be in the format"task:"
. -
def
toString(): String
- Definition Classes
- Identifiable → AnyRef → Any
-
val
topK: IntParam
The number of highest probability vocabulary tokens to keep for top-k-filtering (Default:
50
) -
final
def
transform(dataset: Dataset[_]): DataFrame
- Definition Classes
- AnnotatorModel → Transformer
-
def
transform(dataset: Dataset[_], paramMap: ParamMap): DataFrame
- Definition Classes
- Transformer
- Annotations
- @Since( "2.0.0" )
-
def
transform(dataset: Dataset[_], firstParamPair: ParamPair[_], otherParamPairs: ParamPair[_]*): DataFrame
- Definition Classes
- Transformer
- Annotations
- @Since( "2.0.0" ) @varargs()
-
final
def
transformSchema(schema: StructType): StructType
- Definition Classes
- RawAnnotator → PipelineStage
-
def
transformSchema(schema: StructType, logging: Boolean): StructType
- Attributes
- protected
- Definition Classes
- PipelineStage
- Annotations
- @DeveloperApi()
-
val
uid: String
- Definition Classes
- MedicalEncoderDecoder → Identifiable
-
val
useCache: BooleanParam
Cache internal state of the model to improve performance
-
def
validate(schema: StructType): Boolean
- Attributes
- protected
- Definition Classes
- RawAnnotator
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
def
wrapColumnMetadata(col: Column): Column
- Attributes
- protected
- Definition Classes
- RawAnnotator
-
def
write: MLWriter
- Definition Classes
- ParamsAndFeaturesWritable → DefaultParamsWritable → MLWritable
-
def
writeOnnxModel(path: String, spark: SparkSession, onnxWrapper: OnnxWrapper, suffix: String, fileName: String): Unit
- Definition Classes
- WriteOnnxModel
-
def
writeOnnxModels(path: String, spark: SparkSession, onnxWrappersWithNames: Seq[(OnnxWrapper, String)], suffix: String): Unit
- Definition Classes
- WriteOnnxModel
-
def
writeSentencePieceModel(path: String, spark: SparkSession, spp: SentencePieceWrapper, suffix: String, filename: String): Unit
- Definition Classes
- WriteSentencePieceModel
-
def
writeTensorflowHub(path: String, tfPath: String, spark: SparkSession, suffix: String): Unit
- Definition Classes
- WriteTensorflowModel
-
def
writeTensorflowModel(path: String, spark: SparkSession, tensorflow: TensorflowWrapper, suffix: String, filename: String, configProtoBytes: Option[Array[Byte]]): Unit
- Definition Classes
- WriteTensorflowModel
-
def
writeTensorflowModelV2(path: String, spark: SparkSession, tensorflow: TensorflowWrapper, suffix: String, filename: String, configProtoBytes: Option[Array[Byte]], savedSignatures: Option[Map[String, String]]): Unit
- Definition Classes
- WriteTensorflowModel