Instance Methodswift-transformers 0.1.17Models
predictNextTokenScores(_:config:)
LanguageModel.swift:110func predictNextTokenScores(_ tokens: InputTokens, config: GenerationConfig) -> any MLShapedArrayProtocol
func predictNextTokenScores(_ tokens: InputTokens, config: GenerationConfig) -> any MLShapedArrayProtocol
s6Models13LanguageModelC22predictNextTokenScores_6config6CoreML21MLShapedArrayProtocol_pSaySiG_10Generation0N6ConfigVtF
What are these?1B938
import Models
class LanguageModel
typealias InputTokens = [Int]
struct GenerationConfig
Essentials taken from https://github.com/huggingface/transformers/blob/main/src/transformers/generation/configuration_utils.py
required init(model: MLModel)
static func loadCompiled(url: URL, computeUnits: MLComputeUnits = .cpuAndGPU) throws -> LanguageModel
var bosTokenId: Int? { get async throws }
var defaultDoSample: Bool { get async throws }
var defaultGenerationConfig: GenerationConfig { get }
var description: String { get }
var eosTokenId: Int? { get async throws }
var inputIdsDescription: MLFeatureDescription { get }
var inputIdsName: String { get }
var inputIdsShape: [Int] { get }
The expected shape of the models latent sample input
let maxContextLength: Int
let minContextLength: Int
let model: MLModel
var modelConfig: Config { get async throws }
var modelName: String { get }
var modelType: String? { get async throws }
var requiresAttention: Bool { get }
var textGenerationParameters: Config? { get async throws }
var tokenizer: Tokenizer { get async throws }
var tokenizerConfig: Config? { get async throws }
var tokenizerData: Config { get async throws }