Dataset Viewer
Module
stringclasses 3
values | Name
stringlengths 3
15
| MLX Python
stringlengths 1.77k
25.6k
| MLX Swift
stringlengths 3.79k
44k
| Notes
stringclasses 1
value | Transformers / Diffusers .py (TODO)
float64 | Unnamed: 6
float64 | Unnamed: 7
float64 | Unnamed: 8
float64 | Unnamed: 9
float64 | Unnamed: 10
float64 | Unnamed: 11
float64 | Unnamed: 12
float64 | Unnamed: 13
float64 | Unnamed: 14
float64 | Unnamed: 15
float64 | Unnamed: 16
float64 | Unnamed: 17
float64 | Unnamed: 18
float64 | Unnamed: 19
float64 | Unnamed: 20
float64 | Unnamed: 21
float64 | Unnamed: 22
float64 | Unnamed: 23
float64 | Unnamed: 24
float64 | Unnamed: 25
float64 | Unnamed: 26
float64 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
StableDiffusion
|
StableDiffusion
|
# Copyright © 2023-2024 Apple Inc.
import time
from typing import Optional, Tuple
import mlx.core as mx
from .model_io import (
_DEFAULT_MODEL,
load_autoencoder,
load_diffusion_config,
load_text_encoder,
load_tokenizer,
load_unet,
)
from .sampler import SimpleEulerAncestralSampler, SimpleEulerSampler
class StableDiffusion:
def __init__(self, model: str = _DEFAULT_MODEL, float16: bool = False):
self.dtype = mx.float16 if float16 else mx.float32
self.diffusion_config = load_diffusion_config(model)
self.unet = load_unet(model, float16)
self.text_encoder = load_text_encoder(model, float16)
self.autoencoder = load_autoencoder(model, False)
self.sampler = SimpleEulerSampler(self.diffusion_config)
self.tokenizer = load_tokenizer(model)
def ensure_models_are_loaded(self):
mx.eval(self.unet.parameters())
mx.eval(self.text_encoder.parameters())
mx.eval(self.autoencoder.parameters())
def _tokenize(self, tokenizer, text: str, negative_text: Optional[str] = None):
# Tokenize the text
tokens = [tokenizer.tokenize(text)]
if negative_text is not None:
tokens += [tokenizer.tokenize(negative_text)]
lengths = [len(t) for t in tokens]
N = max(lengths)
tokens = [t + [0] * (N - len(t)) for t in tokens]
tokens = mx.array(tokens)
return tokens
def _get_text_conditioning(
self,
text: str,
n_images: int = 1,
cfg_weight: float = 7.5,
negative_text: str = "",
):
# Tokenize the text
tokens = self._tokenize(
self.tokenizer, text, (negative_text if cfg_weight > 1 else None)
)
# Compute the features
conditioning = self.text_encoder(tokens).last_hidden_state
# Repeat the conditioning for each of the generated images
if n_images > 1:
conditioning = mx.repeat(conditioning, n_images, axis=0)
return conditioning
def _denoising_step(
self, x_t, t, t_prev, conditioning, cfg_weight: float = 7.5, text_time=None
):
x_t_unet = mx.concatenate([x_t] * 2, axis=0) if cfg_weight > 1 else x_t
t_unet = mx.broadcast_to(t, [len(x_t_unet)])
eps_pred = self.unet(
x_t_unet, t_unet, encoder_x=conditioning, text_time=text_time
)
if cfg_weight > 1:
eps_text, eps_neg = eps_pred.split(2)
eps_pred = eps_neg + cfg_weight * (eps_text - eps_neg)
x_t_prev = self.sampler.step(eps_pred, x_t, t, t_prev)
return x_t_prev
def _denoising_loop(
self,
x_T,
T,
conditioning,
num_steps: int = 50,
cfg_weight: float = 7.5,
text_time=None,
):
x_t = x_T
for t, t_prev in self.sampler.timesteps(
num_steps, start_time=T, dtype=self.dtype
):
x_t = self._denoising_step(
x_t, t, t_prev, conditioning, cfg_weight, text_time
)
yield x_t
def generate_latents(
self,
text: str,
n_images: int = 1,
num_steps: int = 50,
cfg_weight: float = 7.5,
negative_text: str = "",
latent_size: Tuple[int] = (64, 64),
seed=None,
):
# Set the PRNG state
seed = int(time.time()) if seed is None else seed
mx.random.seed(seed)
# Get the text conditioning
conditioning = self._get_text_conditioning(
text, n_images, cfg_weight, negative_text
)
# Create the latent variables
x_T = self.sampler.sample_prior(
(n_images, *latent_size, self.autoencoder.latent_channels), dtype=self.dtype
)
# Perform the denoising loop
yield from self._denoising_loop(
x_T, self.sampler.max_time, conditioning, num_steps, cfg_weight
)
def generate_latents_from_image(
self,
image,
text: str,
n_images: int = 1,
strength: float = 0.8,
num_steps: int = 50,
cfg_weight: float = 7.5,
negative_text: str = "",
seed=None,
):
# Set the PRNG state
seed = int(time.time()) if seed is None else seed
mx.random.seed(seed)
# Define the num steps and start step
start_step = self.sampler.max_time * strength
num_steps = int(num_steps * strength)
# Get the text conditioning
conditioning = self._get_text_conditioning(
text, n_images, cfg_weight, negative_text
)
# Get the latents from the input image and add noise according to the
# start time.
x_0, _ = self.autoencoder.encode(image[None])
x_0 = mx.broadcast_to(x_0, (n_images,) + x_0.shape[1:])
x_T = self.sampler.add_noise(x_0, mx.array(start_step))
# Perform the denoising loop
yield from self._denoising_loop(
x_T, start_step, conditioning, num_steps, cfg_weight
)
def decode(self, x_t):
x = self.autoencoder.decode(x_t)
x = mx.clip(x / 2 + 0.5, 0, 1)
return x
class StableDiffusionXL(StableDiffusion):
def __init__(self, model: str = _DEFAULT_MODEL, float16: bool = False):
super().__init__(model, float16)
self.sampler = SimpleEulerAncestralSampler(self.diffusion_config)
self.text_encoder_1 = self.text_encoder
self.tokenizer_1 = self.tokenizer
del self.tokenizer, self.text_encoder
self.text_encoder_2 = load_text_encoder(
model,
float16,
model_key="text_encoder_2",
)
self.tokenizer_2 = load_tokenizer(
model,
merges_key="tokenizer_2_merges",
vocab_key="tokenizer_2_vocab",
)
def ensure_models_are_loaded(self):
mx.eval(self.unet.parameters())
mx.eval(self.text_encoder_1.parameters())
mx.eval(self.text_encoder_2.parameters())
mx.eval(self.autoencoder.parameters())
def _get_text_conditioning(
self,
text: str,
n_images: int = 1,
cfg_weight: float = 7.5,
negative_text: str = "",
):
tokens_1 = self._tokenize(
self.tokenizer_1,
text,
(negative_text if cfg_weight > 1 else None),
)
tokens_2 = self._tokenize(
self.tokenizer_2,
text,
(negative_text if cfg_weight > 1 else None),
)
conditioning_1 = self.text_encoder_1(tokens_1)
conditioning_2 = self.text_encoder_2(tokens_2)
conditioning = mx.concatenate(
[conditioning_1.hidden_states[-2], conditioning_2.hidden_states[-2]],
axis=-1,
)
pooled_conditioning = conditioning_2.pooled_output
if n_images > 1:
conditioning = mx.repeat(conditioning, n_images, axis=0)
pooled_conditioning = mx.repeat(pooled_conditioning, n_images, axis=0)
return conditioning, pooled_conditioning
def generate_latents(
self,
text: str,
n_images: int = 1,
num_steps: int = 2,
cfg_weight: float = 0.0,
negative_text: str = "",
latent_size: Tuple[int] = (64, 64),
seed=None,
):
# Set the PRNG state
seed = int(time.time()) if seed is None else seed
mx.random.seed(seed)
# Get the text conditioning
conditioning, pooled_conditioning = self._get_text_conditioning(
text, n_images, cfg_weight, negative_text
)
text_time = (
pooled_conditioning,
mx.array([[512, 512, 0, 0, 512, 512.0]] * len(pooled_conditioning)),
)
# Create the latent variables
x_T = self.sampler.sample_prior(
(n_images, *latent_size, self.autoencoder.latent_channels), dtype=self.dtype
)
# Perform the denoising loop
yield from self._denoising_loop(
x_T,
self.sampler.max_time,
conditioning,
num_steps,
cfg_weight,
text_time=text_time,
)
def generate_latents_from_image(
self,
image,
text: str,
n_images: int = 1,
strength: float = 0.8,
num_steps: int = 2,
cfg_weight: float = 0.0,
negative_text: str = "",
seed=None,
):
# Set the PRNG state
seed = seed or int(time.time())
mx.random.seed(seed)
# Define the num steps and start step
start_step = self.sampler.max_time * strength
num_steps = int(num_steps * strength)
# Get the text conditioning
conditioning, pooled_conditioning = self._get_text_conditioning(
text, n_images, cfg_weight, negative_text
)
text_time = (
pooled_conditioning,
mx.array([[512, 512, 0, 0, 512, 512.0]] * len(pooled_conditioning)),
)
# Get the latents from the input image and add noise according to the
# start time.
x_0, _ = self.autoencoder.encode(image[None])
x_0 = mx.broadcast_to(x_0, (n_images,) + x_0.shape[1:])
x_T = self.sampler.add_noise(x_0, mx.array(start_step))
# Perform the denoising loop
yield from self._denoising_loop(
x_T, start_step, conditioning, num_steps, cfg_weight, text_time=text_time
)
|
// Copyright © 2024 Apple Inc.
import Foundation
import Hub
import MLX
import MLXNN
// port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/__init__.py
/// Iterator that produces latent images.
///
/// Created by:
///
/// - ``TextToImageGenerator/generateLatents(parameters:)``
/// - ``ImageToImageGenerator/generateLatents(image:parameters:strength:)``
public struct DenoiseIterator: Sequence, IteratorProtocol {
let sd: StableDiffusion
var xt: MLXArray
let conditioning: MLXArray
let cfgWeight: Float
let textTime: (MLXArray, MLXArray)?
var i: Int
let steps: [(MLXArray, MLXArray)]
init(
sd: StableDiffusion, xt: MLXArray, t: Int, conditioning: MLXArray, steps: Int,
cfgWeight: Float, textTime: (MLXArray, MLXArray)? = nil
) {
self.sd = sd
self.steps = sd.sampler.timeSteps(steps: steps, start: t, dType: sd.dType)
self.i = 0
self.xt = xt
self.conditioning = conditioning
self.cfgWeight = cfgWeight
self.textTime = textTime
}
public var underestimatedCount: Int {
steps.count
}
mutating public func next() -> MLXArray? {
guard i < steps.count else {
return nil
}
let (t, tPrev) = steps[i]
i += 1
xt = sd.step(
xt: xt, t: t, tPrev: tPrev, conditioning: conditioning, cfgWeight: cfgWeight,
textTime: textTime)
return xt
}
}
/// Type for the _decoder_ step.
public typealias ImageDecoder = (MLXArray) -> MLXArray
public protocol ImageGenerator {
func ensureLoaded()
/// Return a detached decoder -- this is useful if trying to conserve memory.
///
/// The decoder can be used independently of the ImageGenerator to transform
/// latents into raster images.
func detachedDecoder() -> ImageDecoder
/// the equivalent to the ``detachedDecoder()`` but without the detatching
func decode(xt: MLXArray) -> MLXArray
}
/// Public interface for transforming a text prompt into an image.
///
/// Steps:
///
/// - ``generateLatents(parameters:)``
/// - evaluate each of the latents from the iterator
/// - ``ImageGenerator/decode(xt:)`` or ``ImageGenerator/detachedDecoder()`` to convert the final latent into an image
/// - use ``Image`` to save the image
public protocol TextToImageGenerator: ImageGenerator {
func generateLatents(parameters: EvaluateParameters) -> DenoiseIterator
}
/// Public interface for transforming a text prompt into an image.
///
/// Steps:
///
/// - ``generateLatents(image:parameters:strength:)``
/// - evaluate each of the latents from the iterator
/// - ``ImageGenerator/decode(xt:)`` or ``ImageGenerator/detachedDecoder()`` to convert the final latent into an image
/// - use ``Image`` to save the image
public protocol ImageToImageGenerator: ImageGenerator {
func generateLatents(image: MLXArray, parameters: EvaluateParameters, strength: Float)
-> DenoiseIterator
}
enum ModelContainerError: LocalizedError {
/// Unable to create the particular type of model, e.g. it doesn't support image to image
case unableToCreate(String, String)
/// When operating in conserveMemory mode, it tried to use a model that had been discarded
case modelDiscarded
var errorDescription: String? {
switch self {
case .unableToCreate(let modelId, let generatorType):
return String(
localized:
"Unable to create a \(generatorType) with model ID '\(modelId)'. The model may not support this operation type."
)
case .modelDiscarded:
return String(
localized:
"The model has been discarded to conserve memory and is no longer available. Please recreate the model container."
)
}
}
}
/// Container for models that guarantees single threaded access.
public actor ModelContainer<M> {
enum State {
case discarded
case loaded(M)
}
var state: State
/// if true this will discard the model in ``performTwoStage(first:second:)``
var conserveMemory = false
private init(model: M) {
self.state = .loaded(model)
}
/// create a ``ModelContainer`` that supports ``TextToImageGenerator``
static public func createTextToImageGenerator(
configuration: StableDiffusionConfiguration, loadConfiguration: LoadConfiguration = .init()
) throws -> ModelContainer<TextToImageGenerator> {
if let model = try configuration.textToImageGenerator(configuration: loadConfiguration) {
return .init(model: model)
} else {
throw ModelContainerError.unableToCreate(configuration.id, "TextToImageGenerator")
}
}
/// create a ``ModelContainer`` that supports ``ImageToImageGenerator``
static public func createImageToImageGenerator(
configuration: StableDiffusionConfiguration, loadConfiguration: LoadConfiguration = .init()
) throws -> ModelContainer<ImageToImageGenerator> {
if let model = try configuration.imageToImageGenerator(configuration: loadConfiguration) {
return .init(model: model)
} else {
throw ModelContainerError.unableToCreate(configuration.id, "ImageToImageGenerator")
}
}
public func setConserveMemory(_ conserveMemory: Bool) {
self.conserveMemory = conserveMemory
}
/// Perform an action on the model and/or tokenizer. Callers _must_ eval any `MLXArray` before returning as
/// `MLXArray` is not `Sendable`.
public func perform<R>(_ action: @Sendable (M) throws -> R) throws -> R {
switch state {
case .discarded:
throw ModelContainerError.modelDiscarded
case .loaded(let m):
try action(m)
}
}
/// Perform a two stage action where the first stage returns values passed to the second stage.
///
/// If ``setConservativeMemory(_:)`` is `true` this will discard the model in between
/// the `first` and `second` blocks. The container will have to be recreated if a caller
/// wants to use it again.
///
/// If `false` this will just run them in sequence and the container can be reused.
///
/// Callers _must_ eval any `MLXArray` before returning as `MLXArray` is not `Sendable`.
public func performTwoStage<R1, R2>(
first: @Sendable (M) throws -> R1, second: @Sendable (R1) throws -> R2
) throws -> R2 {
let r1 =
switch state {
case .discarded:
throw ModelContainerError.modelDiscarded
case .loaded(let m):
try first(m)
}
if conserveMemory {
self.state = .discarded
}
return try second(r1)
}
}
/// Base class for Stable Diffusion.
open class StableDiffusion {
let dType: DType
let diffusionConfiguration: DiffusionConfiguration
let unet: UNetModel
let textEncoder: CLIPTextModel
let autoencoder: Autoencoder
let sampler: SimpleEulerSampler
let tokenizer: CLIPTokenizer
internal init(
hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType,
diffusionConfiguration: DiffusionConfiguration? = nil, unet: UNetModel? = nil,
textEncoder: CLIPTextModel? = nil, autoencoder: Autoencoder? = nil,
sampler: SimpleEulerSampler? = nil, tokenizer: CLIPTokenizer? = nil
) throws {
self.dType = dType
self.diffusionConfiguration =
try diffusionConfiguration
?? loadDiffusionConfiguration(hub: hub, configuration: configuration)
self.unet = try unet ?? loadUnet(hub: hub, configuration: configuration, dType: dType)
self.textEncoder =
try textEncoder ?? loadTextEncoder(hub: hub, configuration: configuration, dType: dType)
// note: autoencoder uses float32 weights
self.autoencoder =
try autoencoder
?? loadAutoEncoder(hub: hub, configuration: configuration, dType: .float32)
if let sampler {
self.sampler = sampler
} else {
self.sampler = SimpleEulerSampler(configuration: self.diffusionConfiguration)
}
self.tokenizer = try tokenizer ?? loadTokenizer(hub: hub, configuration: configuration)
}
open func ensureLoaded() {
eval(unet, textEncoder, autoencoder)
}
func tokenize(tokenizer: CLIPTokenizer, text: String, negativeText: String?) -> MLXArray {
var tokens = [tokenizer.tokenize(text: text)]
if let negativeText {
tokens.append(tokenizer.tokenize(text: negativeText))
}
let c = tokens.count
let max = tokens.map { $0.count }.max() ?? 0
let mlxTokens = MLXArray(
tokens
.map {
($0 + Array(repeating: 0, count: max - $0.count))
}
.flatMap { $0 }
)
.reshaped(c, max)
return mlxTokens
}
open func step(
xt: MLXArray, t: MLXArray, tPrev: MLXArray, conditioning: MLXArray, cfgWeight: Float,
textTime: (MLXArray, MLXArray)?
) -> MLXArray {
let xtUnet = cfgWeight > 1 ? concatenated([xt, xt], axis: 0) : xt
let tUnet = broadcast(t, to: [xtUnet.count])
var epsPred = unet(xtUnet, timestep: tUnet, encoderX: conditioning, textTime: textTime)
if cfgWeight > 1 {
let (epsText, epsNeg) = epsPred.split()
epsPred = epsNeg + cfgWeight * (epsText - epsNeg)
}
return sampler.step(epsPred: epsPred, xt: xt, t: t, tPrev: tPrev)
}
public func detachedDecoder() -> ImageDecoder {
let autoencoder = self.autoencoder
func decode(xt: MLXArray) -> MLXArray {
var x = autoencoder.decode(xt)
x = clip(x / 2 + 0.5, min: 0, max: 1)
return x
}
return decode(xt:)
}
public func decode(xt: MLXArray) -> MLXArray {
detachedDecoder()(xt)
}
}
/// Implementation of ``StableDiffusion`` for the `stabilityai/stable-diffusion-2-1-base` model.
open class StableDiffusionBase: StableDiffusion, TextToImageGenerator {
public init(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws {
try super.init(hub: hub, configuration: configuration, dType: dType)
}
func conditionText(text: String, imageCount: Int, cfgWeight: Float, negativeText: String?)
-> MLXArray
{
// tokenize the text
let tokens = tokenize(
tokenizer: tokenizer, text: text, negativeText: cfgWeight > 1 ? negativeText : nil)
// compute the features
var conditioning = textEncoder(tokens).lastHiddenState
// repeat the conditioning for each of the generated images
if imageCount > 1 {
conditioning = repeated(conditioning, count: imageCount, axis: 0)
}
return conditioning
}
public func generateLatents(parameters: EvaluateParameters) -> DenoiseIterator {
MLXRandom.seed(parameters.seed)
let conditioning = conditionText(
text: parameters.prompt, imageCount: parameters.imageCount,
cfgWeight: parameters.cfgWeight, negativeText: parameters.negativePrompt)
let xt = sampler.samplePrior(
shape: [parameters.imageCount] + parameters.latentSize + [autoencoder.latentChannels],
dType: dType)
return DenoiseIterator(
sd: self, xt: xt, t: sampler.maxTime, conditioning: conditioning,
steps: parameters.steps, cfgWeight: parameters.cfgWeight)
}
}
/// Implementation of ``StableDiffusion`` for the `stabilityai/sdxl-turbo` model.
open class StableDiffusionXL: StableDiffusion, TextToImageGenerator, ImageToImageGenerator {
let textEncoder2: CLIPTextModel
let tokenizer2: CLIPTokenizer
public init(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws {
let diffusionConfiguration = try loadConfiguration(
hub: hub, configuration: configuration, key: .diffusionConfig,
type: DiffusionConfiguration.self)
let sampler = SimpleEulerAncestralSampler(configuration: diffusionConfiguration)
self.textEncoder2 = try loadTextEncoder(
hub: hub, configuration: configuration, configKey: .textEncoderConfig2,
weightsKey: .textEncoderWeights2, dType: dType)
self.tokenizer2 = try loadTokenizer(
hub: hub, configuration: configuration, vocabulary: .tokenizerVocabulary2,
merges: .tokenizerMerges2)
try super.init(
hub: hub, configuration: configuration, dType: dType,
diffusionConfiguration: diffusionConfiguration, sampler: sampler)
}
open override func ensureLoaded() {
super.ensureLoaded()
eval(textEncoder2)
}
func conditionText(text: String, imageCount: Int, cfgWeight: Float, negativeText: String?) -> (
MLXArray, MLXArray
) {
let tokens1 = tokenize(
tokenizer: tokenizer, text: text, negativeText: cfgWeight > 1 ? negativeText : nil)
let tokens2 = tokenize(
tokenizer: tokenizer2, text: text, negativeText: cfgWeight > 1 ? negativeText : nil)
let conditioning1 = textEncoder(tokens1)
let conditioning2 = textEncoder2(tokens2)
var conditioning = concatenated(
[
conditioning1.hiddenStates.dropLast().last!,
conditioning2.hiddenStates.dropLast().last!,
],
axis: -1)
var pooledConditionng = conditioning2.pooledOutput
if imageCount > 1 {
conditioning = repeated(conditioning, count: imageCount, axis: 0)
pooledConditionng = repeated(pooledConditionng, count: imageCount, axis: 0)
}
return (conditioning, pooledConditionng)
}
public func generateLatents(parameters: EvaluateParameters) -> DenoiseIterator {
MLXRandom.seed(parameters.seed)
let (conditioning, pooledConditioning) = conditionText(
text: parameters.prompt, imageCount: parameters.imageCount,
cfgWeight: parameters.cfgWeight, negativeText: parameters.negativePrompt)
let textTime = (
pooledConditioning,
repeated(
MLXArray(converting: [512.0, 512, 0, 0, 512, 512]).reshaped(1, -1),
count: pooledConditioning.count, axis: 0)
)
let xt = sampler.samplePrior(
shape: [parameters.imageCount] + parameters.latentSize + [autoencoder.latentChannels],
dType: dType)
return DenoiseIterator(
sd: self, xt: xt, t: sampler.maxTime, conditioning: conditioning,
steps: parameters.steps, cfgWeight: parameters.cfgWeight, textTime: textTime)
}
public func generateLatents(image: MLXArray, parameters: EvaluateParameters, strength: Float)
-> DenoiseIterator
{
MLXRandom.seed(parameters.seed)
// Define the num steps and start step
let startStep = Float(sampler.maxTime) * strength
let numSteps = Int(Float(parameters.steps) * strength)
let (conditioning, pooledConditioning) = conditionText(
text: parameters.prompt, imageCount: parameters.imageCount,
cfgWeight: parameters.cfgWeight, negativeText: parameters.negativePrompt)
let textTime = (
pooledConditioning,
repeated(
MLXArray(converting: [512.0, 512, 0, 0, 512, 512]).reshaped(1, -1),
count: pooledConditioning.count, axis: 0)
)
// Get the latents from the input image and add noise according to the
// start time.
var (x0, _) = autoencoder.encode(image[.newAxis])
x0 = broadcast(x0, to: [parameters.imageCount] + x0.shape.dropFirst())
let xt = sampler.addNoise(x: x0, t: MLXArray(startStep))
return DenoiseIterator(
sd: self, xt: xt, t: sampler.maxTime, conditioning: conditioning, steps: numSteps,
cfgWeight: parameters.cfgWeight, textTime: textTime)
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
StableDiffusion
|
Tokenizer
|
# Copyright © 2023 Apple Inc.
import regex
class Tokenizer:
"""A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ ."""
def __init__(self, bpe_ranks, vocab):
self.bpe_ranks = bpe_ranks
self.vocab = vocab
self.pat = regex.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
regex.IGNORECASE,
)
self._cache = {self.bos: self.bos, self.eos: self.eos}
@property
def bos(self):
return "<|startoftext|>"
@property
def bos_token(self):
return self.vocab[self.bos]
@property
def eos(self):
return "<|endoftext|>"
@property
def eos_token(self):
return self.vocab[self.eos]
def bpe(self, text):
if text in self._cache:
return self._cache[text]
unigrams = list(text[:-1]) + [text[-1] + "</w>"]
unique_bigrams = set(zip(unigrams, unigrams[1:]))
if not unique_bigrams:
return unigrams
# In every iteration try to merge the two most likely bigrams. If none
# was merged we are done.
#
# Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py
while unique_bigrams:
bigram = min(
unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))
)
if bigram not in self.bpe_ranks:
break
new_unigrams = []
skip = False
for a, b in zip(unigrams, unigrams[1:]):
if skip:
skip = False
continue
if (a, b) == bigram:
new_unigrams.append(a + b)
skip = True
else:
new_unigrams.append(a)
if not skip:
new_unigrams.append(b)
unigrams = new_unigrams
unique_bigrams = set(zip(unigrams, unigrams[1:]))
self._cache[text] = unigrams
return unigrams
def tokenize(self, text, prepend_bos=True, append_eos=True):
if isinstance(text, list):
return [self.tokenize(t, prepend_bos, append_eos) for t in text]
# Lower case cleanup and split according to self.pat. Hugging Face does
# a much more thorough job here but this should suffice for 95% of
# cases.
clean_text = regex.sub(r"\s+", " ", text.lower())
tokens = regex.findall(self.pat, clean_text)
# Split the tokens according to the byte-pair merge file
bpe_tokens = [ti for t in tokens for ti in self.bpe(t)]
# Map to token ids and return
tokens = [self.vocab[t] for t in bpe_tokens]
if prepend_bos:
tokens = [self.bos_token] + tokens
if append_eos:
tokens.append(self.eos_token)
return tokens
|
// Copyright © 2024 Apple Inc.
import Foundation
struct Bigram: Hashable {
let a: String
let b: String
init(_ s: String) {
let pieces = s.split(separator: " ")
precondition(pieces.count == 2, "BPEPair expected two pieces for '\(s)'")
self.a = String(pieces[0])
self.b = String(pieces[1])
}
init(_ a: String, _ b: String) {
self.a = a
self.b = b
}
init(_ v: (String, String)) {
self.a = v.0
self.b = v.1
}
}
/// A CLIP tokenizer.
///
/// Ported from:
///
/// - https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/tokenizer.py
/// - https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py
///
/// Ideally this would be a tokenizer from `swift-transformers` but this is too special purpose to be representable in
/// what exists there (at time of writing).
class CLIPTokenizer {
let pattern =
#/<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+/#
let bpeRanks: [Bigram: Int]
let vocabulary: [String: Int]
let bos = "<|startoftext|>"
let eos = "<|endoftext|>"
let bosToken: Int
let eosToken: Int
var cache = [String: [String]]()
init(merges: [String], vocabulary: [String: Int]) {
self.bpeRanks = Dictionary(
uniqueKeysWithValues:
merges
.map { Bigram($0) }
.enumerated()
.map { ($0.element, $0.offset) })
self.vocabulary = vocabulary
self.cache[bos] = [bos]
self.cache[eos] = [eos]
self.bosToken = vocabulary[bos]!
self.eosToken = vocabulary[eos]!
}
func bpe(text: String) -> [String] {
if let result = cache[text] {
return result
}
precondition(!text.isEmpty)
var unigrams = text.dropLast().map { String($0) } + ["\(text.last!)</w>"]
var uniqueBigrams = Set(zip(unigrams, unigrams.dropFirst()).map { Bigram($0) })
// In every iteration try to merge the two most likely bigrams. If none
// was merged we are done
while !uniqueBigrams.isEmpty {
let (bigram, _) =
uniqueBigrams
.map { ($0, bpeRanks[$0] ?? Int.max) }
.min { $0.1 < $1.1 }!
if bpeRanks[bigram] == nil {
break
}
var newUnigrams = [String]()
var skip = false
for (a, b) in zip(unigrams, unigrams.dropFirst()) {
if skip {
skip = false
continue
}
if Bigram(a, b) == bigram {
newUnigrams.append(a + b)
skip = true
} else {
newUnigrams.append(a)
}
}
if !skip, let last = unigrams.last {
newUnigrams.append(last)
}
unigrams = newUnigrams
uniqueBigrams = Set(zip(unigrams, unigrams.dropFirst()).map { Bigram($0) })
}
cache[text] = unigrams
return unigrams
}
public func tokenize(text: String) -> [Int32] {
// Lower case cleanup and split according to self.pat. Hugging Face does
// a much more thorough job here but this should suffice for 95% of
// cases.
let clean = text.lowercased().replacing(#/\s+/#, with: " ")
let tokens = clean.matches(of: pattern).map { $0.description }
// Split the tokens according to the byte-pair merge file
let bpeTokens = tokens.flatMap { bpe(text: String($0)) }
// Map to token ids and return
let result = [bosToken] + bpeTokens.compactMap { vocabulary[$0] } + [eosToken]
return result.map { Int32($0) }
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
StableDiffusion
|
Unet
|
# Copyright © 2023 Apple Inc.
import math
from typing import Optional
import mlx.core as mx
import mlx.nn as nn
from .config import UNetConfig
def upsample_nearest(x, scale: int = 2):
B, H, W, C = x.shape
x = mx.broadcast_to(x[:, :, None, :, None, :], (B, H, scale, W, scale, C))
x = x.reshape(B, H * scale, W * scale, C)
return x
class TimestepEmbedding(nn.Module):
def __init__(self, in_channels: int, time_embed_dim: int):
super().__init__()
self.linear_1 = nn.Linear(in_channels, time_embed_dim)
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim)
def __call__(self, x):
x = self.linear_1(x)
x = nn.silu(x)
x = self.linear_2(x)
return x
class TransformerBlock(nn.Module):
def __init__(
self,
model_dims: int,
num_heads: int,
hidden_dims: Optional[int] = None,
memory_dims: Optional[int] = None,
):
super().__init__()
self.norm1 = nn.LayerNorm(model_dims)
self.attn1 = nn.MultiHeadAttention(model_dims, num_heads)
self.attn1.out_proj.bias = mx.zeros(model_dims)
memory_dims = memory_dims or model_dims
self.norm2 = nn.LayerNorm(model_dims)
self.attn2 = nn.MultiHeadAttention(
model_dims, num_heads, key_input_dims=memory_dims
)
self.attn2.out_proj.bias = mx.zeros(model_dims)
hidden_dims = hidden_dims or 4 * model_dims
self.norm3 = nn.LayerNorm(model_dims)
self.linear1 = nn.Linear(model_dims, hidden_dims)
self.linear2 = nn.Linear(model_dims, hidden_dims)
self.linear3 = nn.Linear(hidden_dims, model_dims)
def __call__(self, x, memory, attn_mask, memory_mask):
# Self attention
y = self.norm1(x)
y = self.attn1(y, y, y, attn_mask)
x = x + y
# Cross attention
y = self.norm2(x)
y = self.attn2(y, memory, memory, memory_mask)
x = x + y
# FFN
y = self.norm3(x)
y_a = self.linear1(y)
y_b = self.linear2(y)
y = y_a * nn.gelu(y_b)
y = self.linear3(y)
x = x + y
return x
class Transformer2D(nn.Module):
"""A transformer model for inputs with 2 spatial dimensions."""
def __init__(
self,
in_channels: int,
model_dims: int,
encoder_dims: int,
num_heads: int,
num_layers: int = 1,
norm_num_groups: int = 32,
):
super().__init__()
self.norm = nn.GroupNorm(norm_num_groups, in_channels, pytorch_compatible=True)
self.proj_in = nn.Linear(in_channels, model_dims)
self.transformer_blocks = [
TransformerBlock(model_dims, num_heads, memory_dims=encoder_dims)
for i in range(num_layers)
]
self.proj_out = nn.Linear(model_dims, in_channels)
def __call__(self, x, encoder_x, attn_mask, encoder_attn_mask):
# Save the input to add to the output
input_x = x
dtype = x.dtype
# Perform the input norm and projection
B, H, W, C = x.shape
x = self.norm(x).reshape(B, -1, C)
x = self.proj_in(x)
# Apply the transformer
for block in self.transformer_blocks:
x = block(x, encoder_x, attn_mask, encoder_attn_mask)
# Apply the output projection and reshape
x = self.proj_out(x)
x = x.reshape(B, H, W, C)
return x + input_x
class ResnetBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: Optional[int] = None,
groups: int = 32,
temb_channels: Optional[int] = None,
):
super().__init__()
out_channels = out_channels or in_channels
self.norm1 = nn.GroupNorm(groups, in_channels, pytorch_compatible=True)
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if temb_channels is not None:
self.time_emb_proj = nn.Linear(temb_channels, out_channels)
self.norm2 = nn.GroupNorm(groups, out_channels, pytorch_compatible=True)
self.conv2 = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if in_channels != out_channels:
self.conv_shortcut = nn.Linear(in_channels, out_channels)
def __call__(self, x, temb=None):
dtype = x.dtype
if temb is not None:
temb = self.time_emb_proj(nn.silu(temb))
y = self.norm1(x)
y = nn.silu(y)
y = self.conv1(y)
if temb is not None:
y = y + temb[:, None, None, :]
y = self.norm2(y)
y = nn.silu(y)
y = self.conv2(y)
x = y + (x if "conv_shortcut" not in self else self.conv_shortcut(x))
return x
class UNetBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
prev_out_channels: Optional[int] = None,
num_layers: int = 1,
transformer_layers_per_block: int = 1,
num_attention_heads: int = 8,
cross_attention_dim=1280,
resnet_groups: int = 32,
add_downsample=True,
add_upsample=True,
add_cross_attention=True,
):
super().__init__()
# Prepare the in channels list for the resnets
if prev_out_channels is None:
in_channels_list = [in_channels] + [out_channels] * (num_layers - 1)
else:
in_channels_list = [prev_out_channels] + [out_channels] * (num_layers - 1)
res_channels_list = [out_channels] * (num_layers - 1) + [in_channels]
in_channels_list = [
a + b for a, b in zip(in_channels_list, res_channels_list)
]
# Add resnet blocks that also process the time embedding
self.resnets = [
ResnetBlock2D(
in_channels=ic,
out_channels=out_channels,
temb_channels=temb_channels,
groups=resnet_groups,
)
for ic in in_channels_list
]
# Add optional cross attention layers
if add_cross_attention:
self.attentions = [
Transformer2D(
in_channels=out_channels,
model_dims=out_channels,
num_heads=num_attention_heads,
num_layers=transformer_layers_per_block,
encoder_dims=cross_attention_dim,
)
for i in range(num_layers)
]
# Add an optional downsampling layer
if add_downsample:
self.downsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=2, padding=1
)
# or upsampling layer
if add_upsample:
self.upsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def __call__(
self,
x,
encoder_x=None,
temb=None,
attn_mask=None,
encoder_attn_mask=None,
residual_hidden_states=None,
):
output_states = []
for i in range(len(self.resnets)):
if residual_hidden_states is not None:
x = mx.concatenate([x, residual_hidden_states.pop()], axis=-1)
x = self.resnets[i](x, temb)
if "attentions" in self:
x = self.attentions[i](x, encoder_x, attn_mask, encoder_attn_mask)
output_states.append(x)
if "downsample" in self:
x = self.downsample(x)
output_states.append(x)
if "upsample" in self:
x = self.upsample(upsample_nearest(x))
output_states.append(x)
return x, output_states
class UNetModel(nn.Module):
"""The conditional 2D UNet model that actually performs the denoising."""
def __init__(self, config: UNetConfig):
super().__init__()
self.conv_in = nn.Conv2d(
config.in_channels,
config.block_out_channels[0],
config.conv_in_kernel,
padding=(config.conv_in_kernel - 1) // 2,
)
self.timesteps = nn.SinusoidalPositionalEncoding(
config.block_out_channels[0],
max_freq=1,
min_freq=math.exp(
-math.log(10000) + 2 * math.log(10000) / config.block_out_channels[0]
),
scale=1.0,
cos_first=True,
full_turns=False,
)
self.time_embedding = TimestepEmbedding(
config.block_out_channels[0],
config.block_out_channels[0] * 4,
)
if config.addition_embed_type == "text_time":
self.add_time_proj = nn.SinusoidalPositionalEncoding(
config.addition_time_embed_dim,
max_freq=1,
min_freq=math.exp(
-math.log(10000)
+ 2 * math.log(10000) / config.addition_time_embed_dim
),
scale=1.0,
cos_first=True,
full_turns=False,
)
self.add_embedding = TimestepEmbedding(
config.projection_class_embeddings_input_dim,
config.block_out_channels[0] * 4,
)
# Make the downsampling blocks
block_channels = [config.block_out_channels[0]] + list(
config.block_out_channels
)
self.down_blocks = [
UNetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=config.block_out_channels[0] * 4,
num_layers=config.layers_per_block[i],
transformer_layers_per_block=config.transformer_layers_per_block[i],
num_attention_heads=config.num_attention_heads[i],
cross_attention_dim=config.cross_attention_dim[i],
resnet_groups=config.norm_num_groups,
add_downsample=(i < len(config.block_out_channels) - 1),
add_upsample=False,
add_cross_attention="CrossAttn" in config.down_block_types[i],
)
for i, (in_channels, out_channels) in enumerate(
zip(block_channels, block_channels[1:])
)
]
# Make the middle block
self.mid_blocks = [
ResnetBlock2D(
in_channels=config.block_out_channels[-1],
out_channels=config.block_out_channels[-1],
temb_channels=config.block_out_channels[0] * 4,
groups=config.norm_num_groups,
),
Transformer2D(
in_channels=config.block_out_channels[-1],
model_dims=config.block_out_channels[-1],
num_heads=config.num_attention_heads[-1],
num_layers=config.transformer_layers_per_block[-1],
encoder_dims=config.cross_attention_dim[-1],
),
ResnetBlock2D(
in_channels=config.block_out_channels[-1],
out_channels=config.block_out_channels[-1],
temb_channels=config.block_out_channels[0] * 4,
groups=config.norm_num_groups,
),
]
# Make the upsampling blocks
block_channels = (
[config.block_out_channels[0]]
+ list(config.block_out_channels)
+ [config.block_out_channels[-1]]
)
self.up_blocks = [
UNetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=config.block_out_channels[0] * 4,
prev_out_channels=prev_out_channels,
num_layers=config.layers_per_block[i] + 1,
transformer_layers_per_block=config.transformer_layers_per_block[i],
num_attention_heads=config.num_attention_heads[i],
cross_attention_dim=config.cross_attention_dim[i],
resnet_groups=config.norm_num_groups,
add_downsample=False,
add_upsample=(i > 0),
add_cross_attention="CrossAttn" in config.up_block_types[i],
)
for i, (in_channels, out_channels, prev_out_channels) in reversed(
list(
enumerate(
zip(block_channels, block_channels[1:], block_channels[2:])
)
)
)
]
self.conv_norm_out = nn.GroupNorm(
config.norm_num_groups,
config.block_out_channels[0],
pytorch_compatible=True,
)
self.conv_out = nn.Conv2d(
config.block_out_channels[0],
config.out_channels,
config.conv_out_kernel,
padding=(config.conv_out_kernel - 1) // 2,
)
def __call__(
self,
x,
timestep,
encoder_x,
attn_mask=None,
encoder_attn_mask=None,
text_time=None,
):
# Compute the time embeddings
temb = self.timesteps(timestep).astype(x.dtype)
temb = self.time_embedding(temb)
# Add the extra text_time conditioning
if text_time is not None:
text_emb, time_ids = text_time
emb = self.add_time_proj(time_ids).flatten(1).astype(x.dtype)
emb = mx.concatenate([text_emb, emb], axis=-1)
emb = self.add_embedding(emb)
temb = temb + emb
# Preprocess the input
x = self.conv_in(x)
# Run the downsampling part of the unet
residuals = [x]
for block in self.down_blocks:
x, res = block(
x,
encoder_x=encoder_x,
temb=temb,
attn_mask=attn_mask,
encoder_attn_mask=encoder_attn_mask,
)
residuals.extend(res)
# Run the middle part of the unet
x = self.mid_blocks[0](x, temb)
x = self.mid_blocks[1](x, encoder_x, attn_mask, encoder_attn_mask)
x = self.mid_blocks[2](x, temb)
# Run the upsampling part of the unet
for block in self.up_blocks:
x, _ = block(
x,
encoder_x=encoder_x,
temb=temb,
attn_mask=attn_mask,
encoder_attn_mask=encoder_attn_mask,
residual_hidden_states=residuals,
)
# Postprocess the output
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
|
// Copyright © 2024 Apple Inc.
import Foundation
import MLX
import MLXNN
// port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/unet.py
func upsampleNearest(_ x: MLXArray, scale: Int = 2) -> MLXArray {
precondition(x.ndim == 4)
let (B, H, W, C) = x.shape4
var x = broadcast(
x[0..., 0..., .newAxis, 0..., .newAxis, 0...], to: [B, H, scale, W, scale, C])
x = x.reshaped(B, H * scale, W * scale, C)
return x
}
class TimestepEmbedding: Module, UnaryLayer {
@ModuleInfo(key: "linear_1") var linear1: Linear
@ModuleInfo(key: "linear_2") var linear2: Linear
init(inputChannels: Int, timeEmbedDimensions: Int) {
self._linear1.wrappedValue = Linear(inputChannels, timeEmbedDimensions)
self._linear2.wrappedValue = Linear(timeEmbedDimensions, timeEmbedDimensions)
}
func callAsFunction(_ x: MLXArray) -> MLXArray {
var x = linear1(x)
x = silu(x)
x = linear2(x)
return x
}
}
class TransformerBlock: Module {
let norm1: LayerNorm
let attn1: MultiHeadAttention
let norm2: LayerNorm
let attn2: MultiHeadAttention
let norm3: LayerNorm
@ModuleInfo var linear1: Linear
@ModuleInfo var linear2: Linear
@ModuleInfo var linear3: Linear
init(
modelDimensions: Int, numHeads: Int, hiddenDimensions: Int? = nil,
memoryDimensions: Int? = nil
) {
norm1 = LayerNorm(dimensions: modelDimensions)
attn1 = MultiHeadAttention(dimensions: modelDimensions, numHeads: numHeads)
// we want to self.attn1.out_proj.bias = mx.zeros(model_dims) turn enable the
// bias in one of the four Linears attached to attn1. Since bias is nil we can't
// update it so just replace the layer.
attn1.update(
modules: ModuleChildren(
values: ["out_proj": .value(Linear(modelDimensions, modelDimensions, bias: true))]))
let memoryDimensions = memoryDimensions ?? modelDimensions
self.norm2 = LayerNorm(dimensions: modelDimensions)
self.attn2 = MultiHeadAttention(
dimensions: modelDimensions, numHeads: numHeads, keyInputDimensions: memoryDimensions)
attn2.update(
modules: ModuleChildren(
values: ["out_proj": .value(Linear(modelDimensions, modelDimensions, bias: true))]))
let hiddenDimensions = hiddenDimensions ?? (4 * modelDimensions)
self.norm3 = LayerNorm(dimensions: modelDimensions)
self.linear1 = Linear(modelDimensions, hiddenDimensions)
self.linear2 = Linear(modelDimensions, hiddenDimensions)
self.linear3 = Linear(hiddenDimensions, modelDimensions)
}
func callAsFunction(
_ x: MLXArray, memory: MLXArray, attentionMask: MLXArray?, memoryMask: MLXArray?
) -> MLXArray {
var x = x
// self attention
var y = norm1(x)
y = attn1(y, keys: y, values: y, mask: attentionMask)
x = x + y
// cross attention
y = norm2(x)
y = attn2(y, keys: memory, values: memory, mask: memoryMask)
x = x + y
// FFN
y = norm3(x)
let ya = linear1(y)
let yb = linear2(y)
y = ya * gelu(yb)
y = linear3(y)
x = x + y
return x
}
}
/// A transformer model for inputs with 2 spatial dimensions
class Transformer2D: Module {
let norm: GroupNorm
@ModuleInfo(key: "proj_in") var projectIn: Linear
@ModuleInfo(key: "transformer_blocks") var transformerBlocks: [TransformerBlock]
@ModuleInfo(key: "proj_out") var projectOut: Linear
init(
inputChannels: Int, modelDimensions: Int, encoderDimensions: Int, numHeads: Int,
numLayers: Int, groupCount: Int = 32
) {
self.norm = GroupNorm(
groupCount: groupCount, dimensions: inputChannels, pytorchCompatible: true)
self._projectIn.wrappedValue = Linear(inputChannels, modelDimensions)
self._transformerBlocks.wrappedValue = (0 ..< numLayers)
.map { _ in
TransformerBlock(
modelDimensions: modelDimensions, numHeads: numHeads,
memoryDimensions: encoderDimensions)
}
self._projectOut.wrappedValue = Linear(modelDimensions, inputChannels)
}
func callAsFunction(
_ x: MLXArray, encoderX: MLXArray, attentionMask: MLXArray?, encoderAttentionMask: MLXArray?
) -> MLXArray {
let inputX = x
let dtype = x.dtype
var x = x
// Perform the input norm and projection
let (B, H, W, C) = x.shape4
x = norm(x).reshaped(B, -1, C)
x = projectIn(x)
// apply the transformer
for block in transformerBlocks {
x = block(
x, memory: encoderX, attentionMask: attentionMask, memoryMask: encoderAttentionMask)
}
// apply the output projection and reshape
x = projectOut(x)
x = x.reshaped(B, H, W, C)
return x + inputX
}
}
class ResnetBlock2D: Module {
let norm1: GroupNorm
let conv1: Conv2d
@ModuleInfo(key: "time_emb_proj") var timeEmbedProjection: Linear?
let norm2: GroupNorm
let conv2: Conv2d
@ModuleInfo(key: "conv_shortcut") var convolutionShortcut: Linear?
init(
inputChannels: Int, outputChannels: Int? = nil, groupCount: Int = 32,
timeEmbedChannels: Int? = nil
) {
let outputChannels = outputChannels ?? inputChannels
self.norm1 = GroupNorm(
groupCount: groupCount, dimensions: inputChannels, pytorchCompatible: true)
self.conv1 = Conv2d(
inputChannels: inputChannels, outputChannels: outputChannels,
kernelSize: 3, stride: 1, padding: 1)
if let timeEmbedChannels {
self._timeEmbedProjection.wrappedValue = Linear(timeEmbedChannels, outputChannels)
}
self.norm2 = GroupNorm(
groupCount: groupCount, dimensions: outputChannels, pytorchCompatible: true)
self.conv2 = Conv2d(
inputChannels: outputChannels, outputChannels: outputChannels,
kernelSize: 3, stride: 1, padding: 1)
if inputChannels != outputChannels {
self._convolutionShortcut.wrappedValue = Linear(inputChannels, outputChannels)
}
}
func callAsFunction(_ x: MLXArray, timeEmbedding: MLXArray? = nil) -> MLXArray {
let dtype = x.dtype
var y = norm1(x)
y = silu(y)
y = conv1(y)
if var timeEmbedding, let timeEmbedProjection {
timeEmbedding = timeEmbedProjection(silu(timeEmbedding))
y = y + timeEmbedding[0..., .newAxis, .newAxis, 0...]
}
y = norm2(y)
y = silu(y)
y = conv2(y)
if let convolutionShortcut {
return y + convolutionShortcut(x)
} else {
return y + x
}
}
}
class UNetBlock2D: Module {
let resnets: [ResnetBlock2D]
let attentions: [Transformer2D]?
let downsample: Conv2d?
let upsample: Conv2d?
init(
inputChannels: Int, outputChannels: Int, timeEmbedChannels: Int,
previousOutChannels: Int? = nil, numLayers: Int = 1, transformerLayersPerBlock: Int = 1,
numHeads: Int = 8, crossAttentionDimension: Int = 1280, resnetGroups: Int = 32,
addDownSample: Bool = true, addUpSample: Bool = true, addCrossAttention: Bool = true
) {
// Prepare the inputChannelsArray for the resnets
let inputChannelsArray: [Int]
if let previousOutChannels {
let inputChannelsBuild =
[previousOutChannels] + Array(repeating: outputChannels, count: numLayers - 1)
let resChannelsArray =
Array(repeating: outputChannels, count: numLayers - 1) + [inputChannels]
inputChannelsArray = zip(inputChannelsBuild, resChannelsArray).map { $0.0 + $0.1 }
} else {
inputChannelsArray =
[inputChannels] + Array(repeating: outputChannels, count: numLayers - 1)
}
// Add resnet blocks that also process the time embedding
self.resnets =
inputChannelsArray
.map { ic in
ResnetBlock2D(
inputChannels: ic, outputChannels: outputChannels, groupCount: resnetGroups,
timeEmbedChannels: timeEmbedChannels)
}
// Add optional cross attention layers
if addCrossAttention {
self.attentions = (0 ..< numLayers)
.map { _ in
Transformer2D(
inputChannels: outputChannels, modelDimensions: outputChannels,
encoderDimensions: crossAttentionDimension, numHeads: numHeads,
numLayers: transformerLayersPerBlock)
}
} else {
self.attentions = nil
}
// Add an optional downsampling layer
if addDownSample {
self.downsample = Conv2d(
inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3,
stride: 2, padding: 1)
} else {
self.downsample = nil
}
// or upsampling layer
if addUpSample {
self.upsample = Conv2d(
inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3,
stride: 1, padding: 1)
} else {
self.upsample = nil
}
}
func callAsFunction(
_ x: MLXArray, encoderX: MLXArray, timeEmbedding: MLXArray? = nil,
attentionMask: MLXArray? = nil, encoderAttentionMask: MLXArray? = nil,
residualHiddenStates: [MLXArray]? = nil
) -> (MLXArray, [MLXArray], [MLXArray]) {
var x = x
var outputStates = [MLXArray]()
var residualHiddenStates = residualHiddenStates
for i in 0 ..< resnets.count {
if residualHiddenStates != nil {
x = concatenated([x, residualHiddenStates!.removeLast()], axis: -1)
}
x = resnets[i](x, timeEmbedding: timeEmbedding)
if let attentions {
x = attentions[i](
x, encoderX: encoderX, attentionMask: attentionMask,
encoderAttentionMask: encoderAttentionMask)
}
outputStates.append(x)
}
if let downsample {
x = downsample(x)
outputStates.append(x)
}
if let upsample {
x = upsample(upsampleNearest(x))
outputStates.append(x)
}
if let residualHiddenStates {
return (x, outputStates, residualHiddenStates)
} else {
return (x, outputStates, [])
}
}
}
class UNetModel: Module {
@ModuleInfo(key: "conv_in") var convIn: Conv2d
let timesteps: SinusoidalPositionalEncoding
@ModuleInfo(key: "time_embedding") var timeEmbedding: TimestepEmbedding
@ModuleInfo(key: "addition_embed_type") var addTimeProj: SinusoidalPositionalEncoding?
@ModuleInfo(key: "add_embedding") var addEmbedding: TimestepEmbedding?
@ModuleInfo(key: "down_blocks") var downBlocks: [UNetBlock2D]
@ModuleInfo(key: "mid_blocks") var midBlocks: (ResnetBlock2D, Transformer2D, ResnetBlock2D)
@ModuleInfo(key: "up_blocks") var upBlocks: [UNetBlock2D]
@ModuleInfo(key: "conv_norm_out") var convNormOut: GroupNorm
@ModuleInfo(key: "conv_out") var convOut: Conv2d
init(configuration: UNetConfiguration) {
let channels0 = configuration.blockOutChannels[0]
self._convIn.wrappedValue = Conv2d(
inputChannels: configuration.inputChannels, outputChannels: channels0,
kernelSize: .init(configuration.convolutionInKernel),
padding: .init((configuration.convolutionInKernel - 1) / 2))
self.timesteps = SinusoidalPositionalEncoding(
dimensions: channels0,
minFrequency: exp(-log(10_000) + 2 * log(10_000) / Float(channels0)),
maxFrequency: 1, scale: 1, cosineFirst: true, fullTurns: false)
self._timeEmbedding.wrappedValue = TimestepEmbedding(
inputChannels: channels0, timeEmbedDimensions: channels0 * 4)
if configuration.additionEmbedType == "text_time",
let additionTimeEmbedDimension = configuration.additionTimeEmbedDimension,
let projectionClassEmbeddingsInputDimension = configuration
.projectionClassEmbeddingsInputDimension
{
self._addTimeProj.wrappedValue = SinusoidalPositionalEncoding(
dimensions: additionTimeEmbedDimension,
minFrequency: exp(
-log(10_000) + 2 * log(10_000) / Float(additionTimeEmbedDimension)),
maxFrequency: 1,
scale: 1, cosineFirst: true, fullTurns: false)
self._addEmbedding.wrappedValue = TimestepEmbedding(
inputChannels: projectionClassEmbeddingsInputDimension,
timeEmbedDimensions: channels0 * 4)
}
// make the downsampling blocks
let downblockChannels = [channels0] + configuration.blockOutChannels
self._downBlocks.wrappedValue = zip(downblockChannels, downblockChannels.dropFirst())
.enumerated()
.map { (i, pair) in
let (inChannels, outChannels) = pair
return UNetBlock2D(
inputChannels: inChannels,
outputChannels: outChannels,
timeEmbedChannels: channels0 * 4,
numLayers: configuration.layersPerBlock[i],
transformerLayersPerBlock: configuration.transformerLayersPerBlock[i],
numHeads: configuration.numHeads[i],
crossAttentionDimension: configuration.crossAttentionDimension[i],
resnetGroups: configuration.normNumGroups,
addDownSample: i < configuration.blockOutChannels.count - 1,
addUpSample: false,
addCrossAttention: configuration.downBlockTypes[i].contains("CrossAttn")
)
}
// make the middle block
let channelsLast = configuration.blockOutChannels.last!
self._midBlocks.wrappedValue = (
ResnetBlock2D(
inputChannels: channelsLast,
outputChannels: channelsLast,
groupCount: configuration.normNumGroups,
timeEmbedChannels: channels0 * 4
),
Transformer2D(
inputChannels: channelsLast,
modelDimensions: channelsLast,
encoderDimensions: configuration.crossAttentionDimension.last!,
numHeads: configuration.numHeads.last!,
numLayers: configuration.transformerLayersPerBlock.last!
),
ResnetBlock2D(
inputChannels: channelsLast,
outputChannels: channelsLast,
groupCount: configuration.normNumGroups,
timeEmbedChannels: channels0 * 4
)
)
// make the upsampling blocks
let upblockChannels =
[channels0] + configuration.blockOutChannels + [configuration.blockOutChannels.last!]
self._upBlocks.wrappedValue =
zip(upblockChannels, zip(upblockChannels.dropFirst(), upblockChannels.dropFirst(2)))
.enumerated()
.reversed()
.map { (i, triple) in
let (inChannels, (outChannels, prevOutChannels)) = triple
return UNetBlock2D(
inputChannels: inChannels,
outputChannels: outChannels,
timeEmbedChannels: channels0 * 4,
previousOutChannels: prevOutChannels,
numLayers: configuration.layersPerBlock[i] + 1,
transformerLayersPerBlock: configuration.transformerLayersPerBlock[i],
numHeads: configuration.numHeads[i],
crossAttentionDimension: configuration.crossAttentionDimension[i],
resnetGroups: configuration.normNumGroups,
addDownSample: false,
addUpSample: i > 0,
addCrossAttention: configuration.upBlockTypes[i].contains("CrossAttn")
)
}
self._convNormOut.wrappedValue = GroupNorm(
groupCount: configuration.normNumGroups, dimensions: channels0, pytorchCompatible: true)
self._convOut.wrappedValue = Conv2d(
inputChannels: channels0, outputChannels: configuration.outputChannels,
kernelSize: .init(configuration.convolutionOutKernel),
padding: .init((configuration.convolutionOutKernel - 1) / 2))
}
func callAsFunction(
_ x: MLXArray, timestep: MLXArray, encoderX: MLXArray, attentionMask: MLXArray? = nil,
encoderAttentionMask: MLXArray? = nil, textTime: (MLXArray, MLXArray)? = nil
) -> MLXArray {
// compute the time embeddings
var temb = timesteps(timestep).asType(x.dtype)
temb = timeEmbedding(temb)
// add the extra textTime conditioning
if let (textEmbedding, timeIds) = textTime,
let addTimeProj, let addEmbedding
{
var emb = addTimeProj(timeIds).flattened(start: 1).asType(x.dtype)
emb = concatenated([textEmbedding, emb], axis: -1)
emb = addEmbedding(emb)
temb = temb + emb
}
// preprocess the input
var x = convIn(x)
// run the downsampling part of the unet
var residuals = [x]
for block in self.downBlocks {
let res: [MLXArray]
(x, res, _) = block(
x, encoderX: encoderX, timeEmbedding: temb, attentionMask: attentionMask,
encoderAttentionMask: encoderAttentionMask)
residuals.append(contentsOf: res)
}
// run the middle part of the unet
x = midBlocks.0(x, timeEmbedding: temb)
x = midBlocks.1(
x, encoderX: encoderX, attentionMask: attentionMask,
encoderAttentionMask: encoderAttentionMask)
x = midBlocks.2(x, timeEmbedding: temb)
// run the upsampling part of the unet
for block in self.upBlocks {
(x, _, residuals) = block(
x, encoderX: encoderX, timeEmbedding: temb, attentionMask: attentionMask,
encoderAttentionMask: encoderAttentionMask, residualHiddenStates: residuals)
}
// postprocess the output
let dtype = x.dtype
x = convNormOut(x)
x = silu(x)
x = convOut(x)
return x
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
StableDiffusion
|
VAE
|
# Copyright © 2023 Apple Inc.
import math
from typing import List
import mlx.core as mx
import mlx.nn as nn
from .config import AutoencoderConfig
from .unet import ResnetBlock2D, upsample_nearest
class Attention(nn.Module):
"""A single head unmasked attention for use with the VAE."""
def __init__(self, dims: int, norm_groups: int = 32):
super().__init__()
self.group_norm = nn.GroupNorm(norm_groups, dims, pytorch_compatible=True)
self.query_proj = nn.Linear(dims, dims)
self.key_proj = nn.Linear(dims, dims)
self.value_proj = nn.Linear(dims, dims)
self.out_proj = nn.Linear(dims, dims)
def __call__(self, x):
B, H, W, C = x.shape
y = self.group_norm(x)
queries = self.query_proj(y).reshape(B, H * W, C)
keys = self.key_proj(y).reshape(B, H * W, C)
values = self.value_proj(y).reshape(B, H * W, C)
scale = 1 / math.sqrt(queries.shape[-1])
scores = (queries * scale) @ keys.transpose(0, 2, 1)
attn = mx.softmax(scores, axis=-1)
y = (attn @ values).reshape(B, H, W, C)
y = self.out_proj(y)
x = x + y
return x
class EncoderDecoderBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
num_layers: int = 1,
resnet_groups: int = 32,
add_downsample=True,
add_upsample=True,
):
super().__init__()
# Add the resnet blocks
self.resnets = [
ResnetBlock2D(
in_channels=in_channels if i == 0 else out_channels,
out_channels=out_channels,
groups=resnet_groups,
)
for i in range(num_layers)
]
# Add an optional downsampling layer
if add_downsample:
self.downsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=2, padding=0
)
# or upsampling layer
if add_upsample:
self.upsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def __call__(self, x):
for resnet in self.resnets:
x = resnet(x)
if "downsample" in self:
x = mx.pad(x, [(0, 0), (0, 1), (0, 1), (0, 0)])
x = self.downsample(x)
if "upsample" in self:
x = self.upsample(upsample_nearest(x))
return x
class Encoder(nn.Module):
"""Implements the encoder side of the Autoencoder."""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: List[int] = [64],
layers_per_block: int = 2,
resnet_groups: int = 32,
):
super().__init__()
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1
)
channels = [block_out_channels[0]] + list(block_out_channels)
self.down_blocks = [
EncoderDecoderBlock2D(
in_channels,
out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=i < len(block_out_channels) - 1,
add_upsample=False,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[-1], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[-1], out_channels, 3, padding=1)
def __call__(self, x):
x = self.conv_in(x)
for l in self.down_blocks:
x = l(x)
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Decoder(nn.Module):
"""Implements the decoder side of the Autoencoder."""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: List[int] = [64],
layers_per_block: int = 2,
resnet_groups: int = 32,
):
super().__init__()
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1
)
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
channels = list(reversed(block_out_channels))
channels = [channels[0]] + channels
self.up_blocks = [
EncoderDecoderBlock2D(
in_channels,
out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=False,
add_upsample=i < len(block_out_channels) - 1,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[0], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
def __call__(self, x):
x = self.conv_in(x)
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
for l in self.up_blocks:
x = l(x)
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Autoencoder(nn.Module):
"""The autoencoder that allows us to perform diffusion in the latent space."""
def __init__(self, config: AutoencoderConfig):
super().__init__()
self.latent_channels = config.latent_channels_in
self.scaling_factor = config.scaling_factor
self.encoder = Encoder(
config.in_channels,
config.latent_channels_out,
config.block_out_channels,
config.layers_per_block,
resnet_groups=config.norm_num_groups,
)
self.decoder = Decoder(
config.latent_channels_in,
config.out_channels,
config.block_out_channels,
config.layers_per_block + 1,
resnet_groups=config.norm_num_groups,
)
self.quant_proj = nn.Linear(
config.latent_channels_out, config.latent_channels_out
)
self.post_quant_proj = nn.Linear(
config.latent_channels_in, config.latent_channels_in
)
def decode(self, z):
z = z / self.scaling_factor
return self.decoder(self.post_quant_proj(z))
def encode(self, x):
x = self.encoder(x)
x = self.quant_proj(x)
mean, logvar = x.split(2, axis=-1)
mean = mean * self.scaling_factor
logvar = logvar + 2 * math.log(self.scaling_factor)
return mean, logvar
def __call__(self, x, key=None):
mean, logvar = self.encode(x)
z = mx.random.normal(mean.shape, key=key) * mx.exp(0.5 * logvar) + mean
x_hat = self.decode(z)
return dict(x_hat=x_hat, z=z, mean=mean, logvar=logvar)
|
// Copyright © 2024 Apple Inc.
import Foundation
import MLX
import MLXNN
// port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/vae.py
class Attention: Module, UnaryLayer {
@ModuleInfo(key: "group_norm") public var groupNorm: GroupNorm
@ModuleInfo(key: "query_proj") public var queryProjection: Linear
@ModuleInfo(key: "key_proj") public var keyProjection: Linear
@ModuleInfo(key: "value_proj") public var valueProjection: Linear
@ModuleInfo(key: "out_proj") public var outProjection: Linear
init(dimensions: Int, groupCount: Int = 32) {
self._groupNorm.wrappedValue = GroupNorm(
groupCount: groupCount, dimensions: dimensions, pytorchCompatible: true)
self._queryProjection.wrappedValue = Linear(dimensions, dimensions)
self._keyProjection.wrappedValue = Linear(dimensions, dimensions)
self._valueProjection.wrappedValue = Linear(dimensions, dimensions)
self._outProjection.wrappedValue = Linear(dimensions, dimensions)
}
func callAsFunction(_ x: MLXArray) -> MLXArray {
let (B, H, W, C) = x.shape4
var y = groupNorm(x)
let queries = queryProjection(y).reshaped(B, H * W, C)
let keys = keyProjection(y).reshaped(B, H * W, C)
let values = valueProjection(y).reshaped(B, H * W, C)
let scale = 1 / sqrt(Float(queries.dim(-1)))
let scores = (queries * scale).matmul(keys.transposed(0, 2, 1))
let attention = softmax(scores, axis: -1)
y = matmul(attention, values).reshaped(B, H, W, C)
y = outProjection(y)
return x + y
}
}
class EncoderDecoderBlock2D: Module, UnaryLayer {
let resnets: [ResnetBlock2D]
let downsample: Conv2d?
let upsample: Conv2d?
init(
inputChannels: Int, outputChannels: Int, numLayers: Int = 1, resnetGroups: Int = 32,
addDownSample: Bool = true, addUpSample: Bool = true
) {
// Add the resnet blocks
self.resnets = (0 ..< numLayers)
.map { i in
ResnetBlock2D(
inputChannels: i == 0 ? inputChannels : outputChannels,
outputChannels: outputChannels,
groupCount: resnetGroups)
}
// Add an optional downsampling layer
if addDownSample {
self.downsample = Conv2d(
inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3,
stride: 2, padding: 0)
} else {
self.downsample = nil
}
// or upsampling layer
if addUpSample {
self.upsample = Conv2d(
inputChannels: outputChannels, outputChannels: outputChannels, kernelSize: 3,
stride: 1, padding: 1)
} else {
self.upsample = nil
}
}
func callAsFunction(_ x: MLXArray) -> MLXArray {
var x = x
for resnet in resnets {
x = resnet(x)
}
if let downsample {
x = padded(x, widths: [[0, 0], [0, 1], [0, 1], [0, 0]])
x = downsample(x)
}
if let upsample {
x = upsample(upsampleNearest(x))
}
return x
}
}
/// Implements the encoder side of the Autoencoder
class VAEncoder: Module, UnaryLayer {
@ModuleInfo(key: "conv_in") var convIn: Conv2d
@ModuleInfo(key: "down_blocks") var downBlocks: [EncoderDecoderBlock2D]
@ModuleInfo(key: "mid_blocks") var midBlocks: (ResnetBlock2D, Attention, ResnetBlock2D)
@ModuleInfo(key: "conv_norm_out") var convNormOut: GroupNorm
@ModuleInfo(key: "conv_out") var convOut: Conv2d
init(
inputChannels: Int, outputChannels: Int, blockOutChannels: [Int] = [64],
layersPerBlock: Int = 2, resnetGroups: Int = 32
) {
let channels0 = blockOutChannels[0]
self._convIn.wrappedValue = Conv2d(
inputChannels: inputChannels, outputChannels: channels0, kernelSize: 3, stride: 1,
padding: 1)
let downblockChannels = [channels0] + blockOutChannels
self._downBlocks.wrappedValue = zip(downblockChannels, downblockChannels.dropFirst())
.enumerated()
.map { (i, pair) in
let (inChannels, outChannels) = pair
return EncoderDecoderBlock2D(
inputChannels: inChannels, outputChannels: outChannels,
numLayers: layersPerBlock, resnetGroups: resnetGroups,
addDownSample: i < blockOutChannels.count - 1,
addUpSample: false
)
}
let channelsLast = blockOutChannels.last!
self._midBlocks.wrappedValue = (
ResnetBlock2D(
inputChannels: channelsLast,
outputChannels: channelsLast,
groupCount: resnetGroups
),
Attention(dimensions: channelsLast, groupCount: resnetGroups),
ResnetBlock2D(
inputChannels: channelsLast,
outputChannels: channelsLast,
groupCount: resnetGroups
)
)
self._convNormOut.wrappedValue = GroupNorm(
groupCount: resnetGroups, dimensions: channelsLast, pytorchCompatible: true)
self._convOut.wrappedValue = Conv2d(
inputChannels: channelsLast, outputChannels: outputChannels,
kernelSize: 3,
padding: 1)
}
func callAsFunction(_ x: MLXArray) -> MLXArray {
var x = convIn(x)
for l in downBlocks {
x = l(x)
}
x = midBlocks.0(x)
x = midBlocks.1(x)
x = midBlocks.2(x)
x = convNormOut(x)
x = silu(x)
x = convOut(x)
return x
}
}
/// Implements the decoder side of the Autoencoder
class VADecoder: Module, UnaryLayer {
@ModuleInfo(key: "conv_in") var convIn: Conv2d
@ModuleInfo(key: "mid_blocks") var midBlocks: (ResnetBlock2D, Attention, ResnetBlock2D)
@ModuleInfo(key: "up_blocks") var upBlocks: [EncoderDecoderBlock2D]
@ModuleInfo(key: "conv_norm_out") var convNormOut: GroupNorm
@ModuleInfo(key: "conv_out") var convOut: Conv2d
init(
inputChannels: Int, outputChannels: Int, blockOutChannels: [Int] = [64],
layersPerBlock: Int = 2, resnetGroups: Int = 32
) {
let channels0 = blockOutChannels[0]
let channelsLast = blockOutChannels.last!
self._convIn.wrappedValue = Conv2d(
inputChannels: inputChannels, outputChannels: channelsLast, kernelSize: 3, stride: 1,
padding: 1)
self._midBlocks.wrappedValue = (
ResnetBlock2D(
inputChannels: channelsLast,
outputChannels: channelsLast,
groupCount: resnetGroups
),
Attention(dimensions: channelsLast, groupCount: resnetGroups),
ResnetBlock2D(
inputChannels: channelsLast,
outputChannels: channelsLast,
groupCount: resnetGroups
)
)
let channels = [channelsLast] + blockOutChannels.reversed()
self._upBlocks.wrappedValue = zip(channels, channels.dropFirst())
.enumerated()
.map { (i, pair) in
let (inChannels, outChannels) = pair
return EncoderDecoderBlock2D(
inputChannels: inChannels,
outputChannels: outChannels,
numLayers: layersPerBlock,
resnetGroups: resnetGroups,
addDownSample: false,
addUpSample: i < blockOutChannels.count - 1
)
}
self._convNormOut.wrappedValue = GroupNorm(
groupCount: resnetGroups, dimensions: channels0, pytorchCompatible: true)
self._convOut.wrappedValue = Conv2d(
inputChannels: channels0, outputChannels: outputChannels,
kernelSize: 3,
padding: 1)
}
func callAsFunction(_ x: MLXArray) -> MLXArray {
var x = convIn(x)
x = midBlocks.0(x)
x = midBlocks.1(x)
x = midBlocks.2(x)
for l in upBlocks {
x = l(x)
}
x = convNormOut(x)
x = silu(x)
x = convOut(x)
return x
}
}
/// The autoencoder that allows us to perform diffusion in the latent space
class Autoencoder: Module {
let latentChannels: Int
let scalingFactor: Float
let encoder: VAEncoder
let decoder: VADecoder
@ModuleInfo(key: "quant_proj") public var quantProjection: Linear
@ModuleInfo(key: "post_quant_proj") public var postQuantProjection: Linear
init(configuration: AutoencoderConfiguration) {
self.latentChannels = configuration.latentChannelsIn
self.scalingFactor = configuration.scalingFactor
self.encoder = VAEncoder(
inputChannels: configuration.inputChannels,
outputChannels: configuration.latentChannelsOut,
blockOutChannels: configuration.blockOutChannels,
layersPerBlock: configuration.layersPerBlock,
resnetGroups: configuration.normNumGroups)
self.decoder = VADecoder(
inputChannels: configuration.latentChannelsIn,
outputChannels: configuration.outputChannels,
blockOutChannels: configuration.blockOutChannels,
layersPerBlock: configuration.layersPerBlock + 1,
resnetGroups: configuration.normNumGroups)
self._quantProjection.wrappedValue = Linear(
configuration.latentChannelsIn, configuration.latentChannelsOut)
self._postQuantProjection.wrappedValue = Linear(
configuration.latentChannelsIn, configuration.latentChannelsIn)
}
func decode(_ z: MLXArray) -> MLXArray {
let z = z / scalingFactor
return decoder(postQuantProjection(z))
}
func encode(_ x: MLXArray) -> (MLXArray, MLXArray) {
var x = encoder(x)
x = quantProjection(x)
var (mean, logvar) = x.split(axis: -1)
mean = mean * scalingFactor
logvar = logvar + 2 * log(scalingFactor)
return (mean, logvar)
}
struct Result {
let xHat: MLXArray
let z: MLXArray
let mean: MLXArray
let logvar: MLXArray
}
func callAsFunction(_ x: MLXArray, key: MLXArray? = nil) -> Result {
let (mean, logvar) = encode(x)
let z = MLXRandom.normal(mean.shape, key: key) * exp(0.5 * logvar) + mean
let xHat = decode(z)
return Result(xHat: xHat, z: z, mean: mean, logvar: logvar)
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
StableDiffusion
|
CLIP
|
# Copyright © 2023-2024 Apple Inc.
from dataclasses import dataclass
from typing import List, Optional
import mlx.core as mx
import mlx.nn as nn
from .config import CLIPTextModelConfig
_ACTIVATIONS = {"quick_gelu": nn.gelu_fast_approx, "gelu": nn.gelu}
@dataclass
class CLIPOutput:
# The last_hidden_state indexed at the EOS token and possibly projected if
# the model has a projection layer
pooled_output: Optional[mx.array] = None
# The full sequence output of the transformer after the final layernorm
last_hidden_state: Optional[mx.array] = None
# A list of hidden states corresponding to the outputs of the transformer layers
hidden_states: Optional[List[mx.array]] = None
class CLIPEncoderLayer(nn.Module):
"""The transformer encoder layer from CLIP."""
def __init__(self, model_dims: int, num_heads: int, activation: str):
super().__init__()
self.layer_norm1 = nn.LayerNorm(model_dims)
self.layer_norm2 = nn.LayerNorm(model_dims)
self.attention = nn.MultiHeadAttention(model_dims, num_heads)
# Add biases to the attention projections to match CLIP
self.attention.query_proj.bias = mx.zeros(model_dims)
self.attention.key_proj.bias = mx.zeros(model_dims)
self.attention.value_proj.bias = mx.zeros(model_dims)
self.attention.out_proj.bias = mx.zeros(model_dims)
self.linear1 = nn.Linear(model_dims, 4 * model_dims)
self.linear2 = nn.Linear(4 * model_dims, model_dims)
self.act = _ACTIVATIONS[activation]
def __call__(self, x, attn_mask=None):
y = self.layer_norm1(x)
y = self.attention(y, y, y, attn_mask)
x = y + x
y = self.layer_norm2(x)
y = self.linear1(y)
y = self.act(y)
y = self.linear2(y)
x = y + x
return x
class CLIPTextModel(nn.Module):
"""Implements the text encoder transformer from CLIP."""
def __init__(self, config: CLIPTextModelConfig):
super().__init__()
self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)
self.position_embedding = nn.Embedding(config.max_length, config.model_dims)
self.layers = [
CLIPEncoderLayer(config.model_dims, config.num_heads, config.hidden_act)
for i in range(config.num_layers)
]
self.final_layer_norm = nn.LayerNorm(config.model_dims)
if config.projection_dim is not None:
self.text_projection = nn.Linear(
config.model_dims, config.projection_dim, bias=False
)
def _get_mask(self, N, dtype):
indices = mx.arange(N)
mask = indices[:, None] < indices[None]
mask = mask.astype(dtype) * (-6e4 if dtype == mx.float16 else -1e9)
return mask
def __call__(self, x):
# Extract some shapes
B, N = x.shape
eos_tokens = x.argmax(-1)
# Compute the embeddings
x = self.token_embedding(x)
x = x + self.position_embedding.weight[:N]
# Compute the features from the transformer
mask = self._get_mask(N, x.dtype)
hidden_states = []
for l in self.layers:
x = l(x, mask)
hidden_states.append(x)
# Apply the final layernorm and return
x = self.final_layer_norm(x)
last_hidden_state = x
# Select the EOS token
pooled_output = x[mx.arange(len(x)), eos_tokens]
if "text_projection" in self:
pooled_output = self.text_projection(pooled_output)
return CLIPOutput(
pooled_output=pooled_output,
last_hidden_state=last_hidden_state,
hidden_states=hidden_states,
)
|
// Copyright © 2024 Apple Inc.
import Foundation
import MLX
import MLXNN
// port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/clip.py
struct CLIPOutput {
/// The lastHiddenState indexed at the EOS token and possibly projected if
/// the model has a projection layer
public var pooledOutput: MLXArray
/// The full sequence output of the transformer after the final layernorm
public var lastHiddenState: MLXArray
/// A list of hidden states corresponding to the outputs of the transformer layers
public var hiddenStates: [MLXArray]
}
/// The transformer encoder layer from CLIP
class CLIPEncoderLayer: Module {
@ModuleInfo(key: "layer_norm1") var layerNorm1: LayerNorm
@ModuleInfo(key: "layer_norm2") var layerNorm2: LayerNorm
let attention: MultiHeadAttention
@ModuleInfo var linear1: Linear
@ModuleInfo var linear2: Linear
let activation: (MLXArray) -> MLXArray
init(modelDimensions: Int, numHeads: Int, activation: @escaping (MLXArray) -> MLXArray) {
self._layerNorm1.wrappedValue = LayerNorm(dimensions: modelDimensions)
self._layerNorm2.wrappedValue = LayerNorm(dimensions: modelDimensions)
self.attention = MultiHeadAttention(
dimensions: modelDimensions, numHeads: numHeads, bias: true)
self.linear1 = Linear(modelDimensions, 4 * modelDimensions)
self.linear2 = Linear(4 * modelDimensions, modelDimensions)
self.activation = activation
}
func callAsFunction(_ x: MLXArray, attentionMask: MLXArray? = nil) -> MLXArray {
var y = layerNorm1(x)
y = attention(y, keys: y, values: y, mask: attentionMask)
var x = y + x
y = layerNorm2(x)
y = linear1(y)
y = activation(y)
y = linear2(y)
x = y + x
return x
}
}
/// Implements the text encoder transformer from CLIP
class CLIPTextModel: Module {
@ModuleInfo(key: "token_embedding") var tokenEmbedding: Embedding
@ModuleInfo(key: "position_embedding") var positionEmbedding: Embedding
let layers: [CLIPEncoderLayer]
@ModuleInfo(key: "final_layer_norm") var finalLayerNorm: LayerNorm
@ModuleInfo(key: "text_projection") var textProjection: Linear?
init(configuration: CLIPTextModelConfiguration) {
self._tokenEmbedding.wrappedValue = Embedding(
embeddingCount: configuration.vocabularySize, dimensions: configuration.modelDimensions)
self._positionEmbedding.wrappedValue = Embedding(
embeddingCount: configuration.maxLength, dimensions: configuration.modelDimensions)
self.layers = (0 ..< configuration.numLayers)
.map { _ in
CLIPEncoderLayer(
modelDimensions: configuration.modelDimensions,
numHeads: configuration.numHeads,
activation: configuration.hiddenActivation.activation)
}
self._finalLayerNorm.wrappedValue = LayerNorm(dimensions: configuration.modelDimensions)
if let projectionDimensions = configuration.projectionDimensions {
self._textProjection.wrappedValue = Linear(
configuration.modelDimensions, projectionDimensions, bias: false)
} else {
self._textProjection.wrappedValue = nil
}
}
func mask(_ N: Int, _ dType: DType) -> MLXArray {
let indices = MLXArray(0 ..< Int32(N))
var mask = indices[0..., .newAxis] .< indices[.newAxis]
mask = mask.asType(dType) * (dType == .float16 ? -6e4 : -1e9)
return mask
}
func callAsFunction(_ x: MLXArray) -> CLIPOutput {
var x = x
let (_, N) = x.shape2
let eosTokens = x.argMax(axis: -1)
// compute the embeddings
x = tokenEmbedding(x)
x = x + positionEmbedding.weight[..<N]
// compute the features from the transformer
let mask = mask(N, x.dtype)
var hiddenStates = [MLXArray]()
for l in layers {
x = l(x, attentionMask: mask)
hiddenStates.append(x)
}
// apply the final layernorm
x = finalLayerNorm(x)
let lastHiddenState = x
// select the EOS token
var pooledOutput = x[MLXArray(0 ..< x.count), eosTokens]
if let textProjection {
pooledOutput = textProjection(pooledOutput)
}
return CLIPOutput(
pooledOutput: pooledOutput, lastHiddenState: lastHiddenState, hiddenStates: hiddenStates
)
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
StableDiffusion
|
Config
|
# Copyright © 2023-2024 Apple Inc.
from dataclasses import dataclass
from typing import Optional, Tuple
@dataclass
class AutoencoderConfig:
in_channels: int = 3
out_channels: int = 3
latent_channels_out: int = 8
latent_channels_in: int = 4
block_out_channels: Tuple[int] = (128, 256, 512, 512)
layers_per_block: int = 2
norm_num_groups: int = 32
scaling_factor: float = 0.18215
@dataclass
class CLIPTextModelConfig:
num_layers: int = 23
model_dims: int = 1024
num_heads: int = 16
max_length: int = 77
vocab_size: int = 49408
projection_dim: Optional[int] = None
hidden_act: str = "quick_gelu"
@dataclass
class UNetConfig:
in_channels: int = 4
out_channels: int = 4
conv_in_kernel: int = 3
conv_out_kernel: int = 3
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
layers_per_block: Tuple[int] = (2, 2, 2, 2)
mid_block_layers: int = 2
transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)
num_attention_heads: Tuple[int] = (5, 10, 20, 20)
cross_attention_dim: Tuple[int] = (1024,) * 4
norm_num_groups: int = 32
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
up_block_types: Tuple[str] = (
"UpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
)
addition_embed_type: Optional[str] = None
addition_time_embed_dim: Optional[int] = None
projection_class_embeddings_input_dim: Optional[int] = None
@dataclass
class DiffusionConfig:
beta_schedule: str = "scaled_linear"
beta_start: float = 0.00085
beta_end: float = 0.012
num_train_steps: int = 1000
|
// Copyright © 2024 Apple Inc.
import Foundation
import MLX
import MLXNN
// port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/config.py
/// Configuration for ``Autoencoder``
struct AutoencoderConfiguration: Codable {
public var inputChannels = 3
public var outputChannels = 3
public var latentChannelsOut: Int { latentChannelsIn * 2 }
public var latentChannelsIn = 4
public var blockOutChannels = [128, 256, 512, 512]
public var layersPerBlock = 2
public var normNumGroups = 32
public var scalingFactor: Float = 0.18215
enum CodingKeys: String, CodingKey {
case inputChannels = "in_channels"
case outputChannels = "out_channels"
case latentChannelsIn = "latent_channels"
case blockOutChannels = "block_out_channels"
case layersPerBlock = "layers_per_block"
case normNumGroups = "norm_num_groups"
case scalingFactor = "scaling_factor"
}
public init(from decoder: any Decoder) throws {
let container: KeyedDecodingContainer<AutoencoderConfiguration.CodingKeys> =
try decoder.container(keyedBy: AutoencoderConfiguration.CodingKeys.self)
// load_autoencoder()
self.scalingFactor =
try container.decodeIfPresent(Float.self, forKey: .scalingFactor) ?? 0.18215
self.inputChannels = try container.decode(Int.self, forKey: .inputChannels)
self.outputChannels = try container.decode(Int.self, forKey: .outputChannels)
self.latentChannelsIn = try container.decode(Int.self, forKey: .latentChannelsIn)
self.blockOutChannels = try container.decode([Int].self, forKey: .blockOutChannels)
self.layersPerBlock = try container.decode(Int.self, forKey: .layersPerBlock)
self.normNumGroups = try container.decode(Int.self, forKey: .normNumGroups)
}
public func encode(to encoder: any Encoder) throws {
var container: KeyedEncodingContainer<AutoencoderConfiguration.CodingKeys> =
encoder.container(keyedBy: AutoencoderConfiguration.CodingKeys.self)
try container.encode(self.inputChannels, forKey: .inputChannels)
try container.encode(self.outputChannels, forKey: .outputChannels)
try container.encode(self.latentChannelsIn, forKey: .latentChannelsIn)
try container.encode(self.blockOutChannels, forKey: .blockOutChannels)
try container.encode(self.layersPerBlock, forKey: .layersPerBlock)
try container.encode(self.normNumGroups, forKey: .normNumGroups)
try container.encode(self.scalingFactor, forKey: .scalingFactor)
}
}
/// Configuration for ``CLIPTextModel``
struct CLIPTextModelConfiguration: Codable {
public enum ClipActivation: String, Codable {
case fast = "quick_gelu"
case gelu = "gelu"
var activation: (MLXArray) -> MLXArray {
switch self {
case .fast: MLXNN.geluFastApproximate
case .gelu: MLXNN.gelu
}
}
}
public var numLayers = 23
public var modelDimensions = 1024
public var numHeads = 16
public var maxLength = 77
public var vocabularySize = 49408
public var projectionDimensions: Int? = nil
public var hiddenActivation: ClipActivation = .fast
enum CodingKeys: String, CodingKey {
case numLayers = "num_hidden_layers"
case modelDimensions = "hidden_size"
case numHeads = "num_attention_heads"
case maxLength = "max_position_embeddings"
case vocabularySize = "vocab_size"
case projectionDimensions = "projection_dim"
case hiddenActivation = "hidden_act"
case architectures = "architectures"
}
public init(from decoder: any Decoder) throws {
let container: KeyedDecodingContainer<CLIPTextModelConfiguration.CodingKeys> =
try decoder.container(keyedBy: CLIPTextModelConfiguration.CodingKeys.self)
// see load_text_encoder
let architectures = try container.decode([String].self, forKey: .architectures)
let withProjection = architectures[0].contains("WithProjection")
self.projectionDimensions =
withProjection
? try container.decodeIfPresent(Int.self, forKey: .projectionDimensions) : nil
self.hiddenActivation =
try container.decodeIfPresent(
CLIPTextModelConfiguration.ClipActivation.self, forKey: .hiddenActivation) ?? .fast
self.numLayers = try container.decode(Int.self, forKey: .numLayers)
self.modelDimensions = try container.decode(Int.self, forKey: .modelDimensions)
self.numHeads = try container.decode(Int.self, forKey: .numHeads)
self.maxLength = try container.decode(Int.self, forKey: .maxLength)
self.vocabularySize = try container.decode(Int.self, forKey: .vocabularySize)
}
public func encode(to encoder: any Encoder) throws {
var container: KeyedEncodingContainer<CLIPTextModelConfiguration.CodingKeys> =
encoder.container(keyedBy: CLIPTextModelConfiguration.CodingKeys.self)
if projectionDimensions != nil {
try container.encode(["WithProjection"], forKey: .architectures)
} else {
try container.encode(["Other"], forKey: .architectures)
}
try container.encode(self.numLayers, forKey: .numLayers)
try container.encode(self.modelDimensions, forKey: .modelDimensions)
try container.encode(self.numHeads, forKey: .numHeads)
try container.encode(self.maxLength, forKey: .maxLength)
try container.encode(self.vocabularySize, forKey: .vocabularySize)
try container.encodeIfPresent(self.projectionDimensions, forKey: .projectionDimensions)
try container.encode(self.hiddenActivation, forKey: .hiddenActivation)
}
}
/// Configuration for ``UNetModel``
struct UNetConfiguration: Codable {
public var inputChannels = 4
public var outputChannels = 4
public var convolutionInKernel = 3
public var convolutionOutKernel = 3
public var blockOutChannels = [320, 640, 1280, 1280]
public var layersPerBlock = [2, 2, 2, 2]
public var midBlockLayers = 2
public var transformerLayersPerBlock = [2, 2, 2, 2]
public var numHeads = [5, 10, 20, 20]
public var crossAttentionDimension = [1024, 1024, 1024, 1024]
public var normNumGroups = 32
public var downBlockTypes: [String] = []
public var upBlockTypes: [String] = []
public var additionEmbedType: String? = nil
public var additionTimeEmbedDimension: Int? = nil
public var projectionClassEmbeddingsInputDimension: Int? = nil
enum CodingKeys: String, CodingKey {
case inputChannels = "in_channels"
case outputChannels = "out_channels"
case convolutionInKernel = "conv_in_kernel"
case convolutionOutKernel = "conv_out_kernel"
case blockOutChannels = "block_out_channels"
case layersPerBlock = "layers_per_block"
case midBlockLayers = "mid_block_layers"
case transformerLayersPerBlock = "transformer_layers_per_block"
case numHeads = "attention_head_dim"
case crossAttentionDimension = "cross_attention_dim"
case normNumGroups = "norm_num_groups"
case downBlockTypes = "down_block_types"
case upBlockTypes = "up_block_types"
case additionEmbedType = "addition_embed_type"
case additionTimeEmbedDimension = "addition_time_embed_dim"
case projectionClassEmbeddingsInputDimension = "projection_class_embeddings_input_dim"
}
public init() {
}
public init(from decoder: Decoder) throws {
let container: KeyedDecodingContainer<UNetConfiguration.CodingKeys> = try decoder.container(
keyedBy: UNetConfiguration.CodingKeys.self)
// customizations based on def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
//
// Note: the encode() writes out the internal format (and this can load it back in)
self.blockOutChannels = try container.decode([Int].self, forKey: .blockOutChannels)
let nBlocks = blockOutChannels.count
self.layersPerBlock =
try (try? container.decode([Int].self, forKey: .layersPerBlock))
?? Array(repeating: container.decode(Int.self, forKey: .layersPerBlock), count: nBlocks)
self.transformerLayersPerBlock =
(try? container.decode([Int].self, forKey: .transformerLayersPerBlock)) ?? [1, 1, 1, 1]
self.numHeads =
try (try? container.decodeIfPresent([Int].self, forKey: .numHeads))
?? Array(repeating: container.decode(Int.self, forKey: .numHeads), count: nBlocks)
self.crossAttentionDimension =
try (try? container.decode([Int].self, forKey: .crossAttentionDimension))
?? Array(
repeating: container.decode(Int.self, forKey: .crossAttentionDimension),
count: nBlocks)
self.upBlockTypes = try container.decode([String].self, forKey: .upBlockTypes).reversed()
self.convolutionInKernel =
try container.decodeIfPresent(Int.self, forKey: .convolutionInKernel) ?? 3
self.convolutionOutKernel =
try container.decodeIfPresent(Int.self, forKey: .convolutionOutKernel) ?? 3
self.midBlockLayers = try container.decodeIfPresent(Int.self, forKey: .midBlockLayers) ?? 2
self.inputChannels = try container.decode(Int.self, forKey: .inputChannels)
self.outputChannels = try container.decode(Int.self, forKey: .outputChannels)
self.normNumGroups = try container.decode(Int.self, forKey: .normNumGroups)
self.downBlockTypes = try container.decode([String].self, forKey: .downBlockTypes)
self.additionEmbedType = try container.decodeIfPresent(
String.self, forKey: .additionEmbedType)
self.additionTimeEmbedDimension = try container.decodeIfPresent(
Int.self, forKey: .additionTimeEmbedDimension)
self.projectionClassEmbeddingsInputDimension = try container.decodeIfPresent(
Int.self, forKey: .projectionClassEmbeddingsInputDimension)
}
public func encode(to encoder: Encoder) throws {
var container: KeyedEncodingContainer<UNetConfiguration.CodingKeys> = encoder.container(
keyedBy: UNetConfiguration.CodingKeys.self)
try container.encode(self.upBlockTypes.reversed(), forKey: .upBlockTypes)
try container.encode(self.inputChannels, forKey: .inputChannels)
try container.encode(self.outputChannels, forKey: .outputChannels)
try container.encode(self.convolutionInKernel, forKey: .convolutionInKernel)
try container.encode(self.convolutionOutKernel, forKey: .convolutionOutKernel)
try container.encode(self.blockOutChannels, forKey: .blockOutChannels)
try container.encode(self.layersPerBlock, forKey: .layersPerBlock)
try container.encode(self.midBlockLayers, forKey: .midBlockLayers)
try container.encode(self.transformerLayersPerBlock, forKey: .transformerLayersPerBlock)
try container.encode(self.numHeads, forKey: .numHeads)
try container.encode(self.crossAttentionDimension, forKey: .crossAttentionDimension)
try container.encode(self.normNumGroups, forKey: .normNumGroups)
try container.encode(self.downBlockTypes, forKey: .downBlockTypes)
try container.encodeIfPresent(self.additionEmbedType, forKey: .additionEmbedType)
try container.encodeIfPresent(
self.additionTimeEmbedDimension, forKey: .additionTimeEmbedDimension)
try container.encodeIfPresent(
self.projectionClassEmbeddingsInputDimension,
forKey: .projectionClassEmbeddingsInputDimension)
}
}
/// Configuration for ``StableDiffusion``
public struct DiffusionConfiguration: Codable {
public enum BetaSchedule: String, Codable {
case linear = "linear"
case scaledLinear = "scaled_linear"
}
public var betaSchedule = BetaSchedule.scaledLinear
public var betaStart: Float = 0.00085
public var betaEnd: Float = 0.012
public var trainSteps = 3
enum CodingKeys: String, CodingKey {
case betaSchedule = "beta_schedule"
case betaStart = "beta_start"
case betaEnd = "beta_end"
case trainSteps = "num_train_timesteps"
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
StableDiffusion
|
Load
|
# Copyright © 2023-2024 Apple Inc.
import json
from typing import Optional
import mlx.core as mx
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
_DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base"
_MODELS = {
# See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
"stabilityai/sdxl-turbo": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"text_encoder_2_config": "text_encoder_2/config.json",
"text_encoder_2": "text_encoder_2/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
"tokenizer_2_vocab": "tokenizer_2/vocab.json",
"tokenizer_2_merges": "tokenizer_2/merges.txt",
},
# See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
"stabilityai/stable-diffusion-2-1-base": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
},
}
def map_unet_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
# Map transformer ffn
if "ff.net.2" in key:
key = key.replace("ff.net.2", "linear3")
if "ff.net.0" in key:
k1 = key.replace("ff.net.0.proj", "linear1")
k2 = key.replace("ff.net.0.proj", "linear2")
v1, v2 = mx.split(value, 2)
return [(k1, v1), (k2, v2)]
if "conv_shortcut.weight" in key:
value = value.squeeze()
# Transform the weights from 1x1 convs to linear
if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key):
value = value.squeeze()
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
value = value.reshape(-1).reshape(value.shape)
return [(key, value)]
def map_clip_text_encoder_weights(key, value):
# Remove prefixes
if key.startswith("text_model."):
key = key[11:]
if key.startswith("embeddings."):
key = key[11:]
if key.startswith("encoder."):
key = key[8:]
# Map attention layers
if "self_attn." in key:
key = key.replace("self_attn.", "attention.")
if "q_proj." in key:
key = key.replace("q_proj.", "query_proj.")
if "k_proj." in key:
key = key.replace("k_proj.", "key_proj.")
if "v_proj." in key:
key = key.replace("v_proj.", "value_proj.")
# Map ffn layers
if "mlp.fc1" in key:
key = key.replace("mlp.fc1", "linear1")
if "mlp.fc2" in key:
key = key.replace("mlp.fc2", "linear2")
return [(key, value)]
def map_vae_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
value = value.reshape(-1).reshape(value.shape)
return [(key, value)]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = mx.float16 if float16 else mx.float32
weights = mx.load(weight_file)
weights = _flatten([mapper(k, v.astype(dtype)) for k, v in weights.items()])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
transformer_layers_per_block=config.get(
"transformer_layers_per_block", (1,) * 4
),
num_attention_heads=(
[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"]
),
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
down_block_types=config["down_block_types"],
up_block_types=config["up_block_types"][::-1],
addition_embed_type=config.get("addition_embed_type", None),
addition_time_embed_dim=config.get("addition_time_embed_dim", None),
projection_class_embeddings_input_dim=config.get(
"projection_class_embeddings_input_dim", None
),
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(
key: str = _DEFAULT_MODEL,
float16: bool = False,
model_key: str = "text_encoder",
config_key: Optional[str] = None,
):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
config_key = config_key or (model_key + "_config")
# Download the config and create the model
text_encoder_config = _MODELS[key][config_key]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
with_projection = "WithProjection" in config["architectures"][0]
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
projection_dim=config["projection_dim"] if with_projection else None,
hidden_act=config.get("hidden_act", "quick_gelu"),
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key][model_key]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
model = Autoencoder(
AutoencoderConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
latent_channels_out=2 * config["latent_channels"],
latent_channels_in=config["latent_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=config["layers_per_block"],
norm_num_groups=config["norm_num_groups"],
scaling_factor=config.get("scaling_factor", 0.18215),
)
)
# Download the weights and map them into the model
vae_weights = _MODELS[key]["vae"]
weight_file = hf_hub_download(key, vae_weights)
_load_safetensor_weights(map_vae_weights, model, weight_file, float16)
return model
def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"]
with open(hf_hub_download(key, diffusion_config)) as f:
config = json.load(f)
return DiffusionConfig(
beta_start=config["beta_start"],
beta_end=config["beta_end"],
beta_schedule=config["beta_schedule"],
num_train_steps=config["num_train_timesteps"],
)
def load_tokenizer(
key: str = _DEFAULT_MODEL,
vocab_key: str = "tokenizer_vocab",
merges_key: str = "tokenizer_merges",
):
_check_key(key, "load_tokenizer")
vocab_file = hf_hub_download(key, _MODELS[key][vocab_key])
with open(vocab_file, encoding="utf-8") as f:
vocab = json.load(f)
merges_file = hf_hub_download(key, _MODELS[key][merges_key])
with open(merges_file, encoding="utf-8") as f:
bpe_merges = f.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
bpe_merges = [tuple(m.split()) for m in bpe_merges]
bpe_ranks = dict(map(reversed, enumerate(bpe_merges)))
return Tokenizer(bpe_ranks, vocab)
|
// Copyright © 2024 Apple Inc.
import Foundation
import Hub
import MLX
import MLXNN
// port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/model_io.py
/// Configuration for loading stable diffusion weights.
///
/// These options can be tuned to conserve memory.
public struct LoadConfiguration: Sendable {
/// convert weights to float16
public var float16 = true
/// quantize weights
public var quantize = false
public var dType: DType {
float16 ? .float16 : .float32
}
public init(float16: Bool = true, quantize: Bool = false) {
self.float16 = float16
self.quantize = quantize
}
}
/// Parameters for evaluating a stable diffusion prompt and generating latents
public struct EvaluateParameters: Sendable {
/// `cfg` value from the preset
public var cfgWeight: Float
/// number of steps -- default is from the preset
public var steps: Int
/// number of images to generate at a time
public var imageCount = 1
public var decodingBatchSize = 1
/// size of the latent tensor -- the result image is a factor of 8 larger than this
public var latentSize = [64, 64]
public var seed: UInt64
public var prompt = ""
public var negativePrompt = ""
public init(
cfgWeight: Float, steps: Int, imageCount: Int = 1, decodingBatchSize: Int = 1,
latentSize: [Int] = [64, 64], seed: UInt64? = nil, prompt: String = "",
negativePrompt: String = ""
) {
self.cfgWeight = cfgWeight
self.steps = steps
self.imageCount = imageCount
self.decodingBatchSize = decodingBatchSize
self.latentSize = latentSize
self.seed = seed ?? UInt64(Date.timeIntervalSinceReferenceDate * 1000)
self.prompt = prompt
self.negativePrompt = negativePrompt
}
}
/// File types for ``StableDiffusionConfiguration/files``. Used by the presets to provide
/// relative file paths for different types of files.
enum FileKey {
case unetConfig
case unetWeights
case textEncoderConfig
case textEncoderWeights
case textEncoderConfig2
case textEncoderWeights2
case vaeConfig
case vaeWeights
case diffusionConfig
case tokenizerVocabulary
case tokenizerMerges
case tokenizerVocabulary2
case tokenizerMerges2
}
/// Stable diffusion configuration -- this selects the model to load.
///
/// Use the preset values:
/// - ``presetSDXLTurbo``
/// - ``presetStableDiffusion21Base``
///
/// or use the enum (convenient for command line tools):
///
/// - ``Preset/sdxlTurbo``
/// - ``Preset/sdxlTurbo``
///
/// Call ``download(hub:progressHandler:)`` to download the weights, then
/// ``textToImageGenerator(hub:configuration:)`` or
/// ``imageToImageGenerator(hub:configuration:)`` to produce the ``ImageGenerator``.
///
/// The ``ImageGenerator`` has a method to generate the latents:
/// - ``TextToImageGenerator/generateLatents(parameters:)``
/// - ``ImageToImageGenerator/generateLatents(image:parameters:strength:)``
///
/// Evaluate each of the latents from that iterator and use the decoder to turn the last latent
/// into an image:
///
/// - ``ImageGenerator/decode(xt:)``
///
/// Finally use ``Image`` to save it to a file or convert to a CGImage for display.
public struct StableDiffusionConfiguration: Sendable {
public let id: String
let files: [FileKey: String]
public let defaultParameters: @Sendable () -> EvaluateParameters
let factory:
@Sendable (HubApi, StableDiffusionConfiguration, LoadConfiguration) throws ->
StableDiffusion
public func download(
hub: HubApi = HubApi(), progressHandler: @escaping (Progress) -> Void = { _ in }
) async throws {
let repo = Hub.Repo(id: self.id)
try await hub.snapshot(
from: repo, matching: Array(files.values), progressHandler: progressHandler)
}
public func textToImageGenerator(hub: HubApi = HubApi(), configuration: LoadConfiguration)
throws -> TextToImageGenerator?
{
try factory(hub, self, configuration) as? TextToImageGenerator
}
public func imageToImageGenerator(hub: HubApi = HubApi(), configuration: LoadConfiguration)
throws -> ImageToImageGenerator?
{
try factory(hub, self, configuration) as? ImageToImageGenerator
}
public enum Preset: String, Codable, CaseIterable, Sendable {
case base
case sdxlTurbo = "sdxl-turbo"
public var configuration: StableDiffusionConfiguration {
switch self {
case .base: presetStableDiffusion21Base
case .sdxlTurbo: presetSDXLTurbo
}
}
}
/// See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
public static let presetSDXLTurbo = StableDiffusionConfiguration(
id: "stabilityai/sdxl-turbo",
files: [
.unetConfig: "unet/config.json",
.unetWeights: "unet/diffusion_pytorch_model.safetensors",
.textEncoderConfig: "text_encoder/config.json",
.textEncoderWeights: "text_encoder/model.safetensors",
.textEncoderConfig2: "text_encoder_2/config.json",
.textEncoderWeights2: "text_encoder_2/model.safetensors",
.vaeConfig: "vae/config.json",
.vaeWeights: "vae/diffusion_pytorch_model.safetensors",
.diffusionConfig: "scheduler/scheduler_config.json",
.tokenizerVocabulary: "tokenizer/vocab.json",
.tokenizerMerges: "tokenizer/merges.txt",
.tokenizerVocabulary2: "tokenizer_2/vocab.json",
.tokenizerMerges2: "tokenizer_2/merges.txt",
],
defaultParameters: { EvaluateParameters(cfgWeight: 0, steps: 2) },
factory: { hub, sdConfiguration, loadConfiguration in
let sd = try StableDiffusionXL(
hub: hub, configuration: sdConfiguration, dType: loadConfiguration.dType)
if loadConfiguration.quantize {
quantize(model: sd.textEncoder, filter: { k, m in m is Linear })
quantize(model: sd.textEncoder2, filter: { k, m in m is Linear })
quantize(model: sd.unet, groupSize: 32, bits: 8)
}
return sd
}
)
/// See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
public static let presetStableDiffusion21Base = StableDiffusionConfiguration(
id: "stabilityai/stable-diffusion-2-1-base",
files: [
.unetConfig: "unet/config.json",
.unetWeights: "unet/diffusion_pytorch_model.safetensors",
.textEncoderConfig: "text_encoder/config.json",
.textEncoderWeights: "text_encoder/model.safetensors",
.vaeConfig: "vae/config.json",
.vaeWeights: "vae/diffusion_pytorch_model.safetensors",
.diffusionConfig: "scheduler/scheduler_config.json",
.tokenizerVocabulary: "tokenizer/vocab.json",
.tokenizerMerges: "tokenizer/merges.txt",
],
defaultParameters: { EvaluateParameters(cfgWeight: 7.5, steps: 50) },
factory: { hub, sdConfiguration, loadConfiguration in
let sd = try StableDiffusionBase(
hub: hub, configuration: sdConfiguration, dType: loadConfiguration.dType)
if loadConfiguration.quantize {
quantize(model: sd.textEncoder, filter: { k, m in m is Linear })
quantize(model: sd.unet, groupSize: 32, bits: 8)
}
return sd
}
)
}
// MARK: - Key Mapping
func keyReplace(_ replace: String, _ with: String) -> @Sendable (String) -> String? {
return { [replace, with] key in
if key.contains(replace) {
return key.replacingOccurrences(of: replace, with: with)
}
return nil
}
}
func dropPrefix(_ prefix: String) -> @Sendable (String) -> String? {
return { [prefix] key in
if key.hasPrefix(prefix) {
return String(key.dropFirst(prefix.count))
}
return nil
}
}
// see map_unet_weights()
let unetRules: [@Sendable (String) -> String?] = [
// Map up/downsampling
keyReplace("downsamplers.0.conv", "downsample"),
keyReplace("upsamplers.0.conv", "upsample"),
// Map the mid block
keyReplace("mid_block.resnets.0", "mid_blocks.0"),
keyReplace("mid_block.attentions.0", "mid_blocks.1"),
keyReplace("mid_block.resnets.1", "mid_blocks.2"),
// Map attention layers
keyReplace("to_k", "key_proj"),
keyReplace("to_out.0", "out_proj"),
keyReplace("to_q", "query_proj"),
keyReplace("to_v", "value_proj"),
// Map transformer ffn
keyReplace("ff.net.2", "linear3"),
]
func unetRemap(key: String, value: MLXArray) -> [(String, MLXArray)] {
var key = key
var value = value
for rule in unetRules {
key = rule(key) ?? key
}
// Map transformer ffn
if key.contains("ff.net.0") {
let k1 = key.replacingOccurrences(of: "ff.net.0.proj", with: "linear1")
let k2 = key.replacingOccurrences(of: "ff.net.0.proj", with: "linear2")
let (v1, v2) = value.split()
return [(k1, v1), (k2, v2)]
}
if key.contains("conv_shortcut.weight") {
value = value.squeezed()
}
// Transform the weights from 1x1 convs to linear
if value.ndim == 4 && (key.contains("proj_in") || key.contains("proj_out")) {
value = value.squeezed()
}
if value.ndim == 4 {
value = value.transposed(0, 2, 3, 1)
value = value.reshaped(-1).reshaped(value.shape)
}
return [(key, value)]
}
let clipRules: [@Sendable (String) -> String?] = [
dropPrefix("text_model."),
dropPrefix("embeddings."),
dropPrefix("encoder."),
// Map attention layers
keyReplace("self_attn.", "attention."),
keyReplace("q_proj.", "query_proj."),
keyReplace("k_proj.", "key_proj."),
keyReplace("v_proj.", "value_proj."),
// Map ffn layers
keyReplace("mlp.fc1", "linear1"),
keyReplace("mlp.fc2", "linear2"),
]
func clipRemap(key: String, value: MLXArray) -> [(String, MLXArray)] {
var key = key
for rule in clipRules {
key = rule(key) ?? key
}
// not used
if key == "position_ids" {
return []
}
return [(key, value)]
}
let vaeRules: [@Sendable (String) -> String?] = [
// Map up/downsampling
keyReplace("downsamplers.0.conv", "downsample"),
keyReplace("upsamplers.0.conv", "upsample"),
// Map attention layers
keyReplace("to_k", "key_proj"),
keyReplace("to_out.0", "out_proj"),
keyReplace("to_q", "query_proj"),
keyReplace("to_v", "value_proj"),
// Map the mid block
keyReplace("mid_block.resnets.0", "mid_blocks.0"),
keyReplace("mid_block.attentions.0", "mid_blocks.1"),
keyReplace("mid_block.resnets.1", "mid_blocks.2"),
keyReplace("mid_blocks.1.key.", "mid_blocks.1.key_proj."),
keyReplace("mid_blocks.1.query.", "mid_blocks.1.query_proj."),
keyReplace("mid_blocks.1.value.", "mid_blocks.1.value_proj."),
keyReplace("mid_blocks.1.proj_attn.", "mid_blocks.1.out_proj."),
]
func vaeRemap(key: String, value: MLXArray) -> [(String, MLXArray)] {
var key = key
var value = value
for rule in vaeRules {
key = rule(key) ?? key
}
// Map the quant/post_quant layers
if key.contains("quant_conv") {
key = key.replacingOccurrences(of: "quant_conv", with: "quant_proj")
value = value.squeezed()
}
// Map the conv_shortcut to linear
if key.contains("conv_shortcut.weight") {
value = value.squeezed()
}
if value.ndim == 4 {
value = value.transposed(0, 2, 3, 1)
value = value.reshaped(-1).reshaped(value.shape)
}
return [(key, value)]
}
func loadWeights(
url: URL, model: Module, mapper: (String, MLXArray) -> [(String, MLXArray)], dType: DType
) throws {
let weights = try loadArrays(url: url).flatMap { mapper($0.key, $0.value.asType(dType)) }
// Note: not using verifier because some shapes change upon load
try model.update(parameters: ModuleParameters.unflattened(weights), verify: .none)
}
// MARK: - Loading
func resolve(hub: HubApi, configuration: StableDiffusionConfiguration, key: FileKey) -> URL {
precondition(
configuration.files[key] != nil, "configuration \(configuration.id) missing key: \(key)")
let repo = Hub.Repo(id: configuration.id)
let directory = hub.localRepoLocation(repo)
return directory.appending(component: configuration.files[key]!)
}
func loadConfiguration<T: Decodable>(
hub: HubApi, configuration: StableDiffusionConfiguration, key: FileKey, type: T.Type
) throws -> T {
let url = resolve(hub: hub, configuration: configuration, key: key)
return try JSONDecoder().decode(T.self, from: Data(contentsOf: url))
}
func loadUnet(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws
-> UNetModel
{
let unetConfiguration = try loadConfiguration(
hub: hub, configuration: configuration, key: .unetConfig, type: UNetConfiguration.self)
let model = UNetModel(configuration: unetConfiguration)
let weightsURL = resolve(hub: hub, configuration: configuration, key: .unetWeights)
try loadWeights(url: weightsURL, model: model, mapper: unetRemap, dType: dType)
return model
}
func loadTextEncoder(
hub: HubApi, configuration: StableDiffusionConfiguration,
configKey: FileKey = .textEncoderConfig, weightsKey: FileKey = .textEncoderWeights, dType: DType
) throws -> CLIPTextModel {
let clipConfiguration = try loadConfiguration(
hub: hub, configuration: configuration, key: configKey,
type: CLIPTextModelConfiguration.self)
let model = CLIPTextModel(configuration: clipConfiguration)
let weightsURL = resolve(hub: hub, configuration: configuration, key: weightsKey)
try loadWeights(url: weightsURL, model: model, mapper: clipRemap, dType: dType)
return model
}
func loadAutoEncoder(hub: HubApi, configuration: StableDiffusionConfiguration, dType: DType) throws
-> Autoencoder
{
let autoEncoderConfiguration = try loadConfiguration(
hub: hub, configuration: configuration, key: .vaeConfig, type: AutoencoderConfiguration.self
)
let model = Autoencoder(configuration: autoEncoderConfiguration)
let weightsURL = resolve(hub: hub, configuration: configuration, key: .vaeWeights)
try loadWeights(url: weightsURL, model: model, mapper: vaeRemap, dType: dType)
return model
}
func loadDiffusionConfiguration(hub: HubApi, configuration: StableDiffusionConfiguration) throws
-> DiffusionConfiguration
{
try loadConfiguration(
hub: hub, configuration: configuration, key: .diffusionConfig,
type: DiffusionConfiguration.self)
}
// MARK: - Tokenizer
func loadTokenizer(
hub: HubApi, configuration: StableDiffusionConfiguration,
vocabulary: FileKey = .tokenizerVocabulary, merges: FileKey = .tokenizerMerges
) throws -> CLIPTokenizer {
let vocabularyURL = resolve(hub: hub, configuration: configuration, key: vocabulary)
let mergesURL = resolve(hub: hub, configuration: configuration, key: merges)
let vocabulary = try JSONDecoder().decode(
[String: Int].self, from: Data(contentsOf: vocabularyURL))
let merges = try String(contentsOf: mergesURL)
.components(separatedBy: .newlines)
// first line is a comment
.dropFirst()
.filter { !$0.isEmpty }
return CLIPTokenizer(merges: merges, vocabulary: vocabulary)
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
StableDiffusion
|
Sampler
|
# Copyright © 2023 Apple Inc.
import mlx.core as mx
from .config import DiffusionConfig
def _linspace(a, b, num):
x = mx.arange(0, num) / (num - 1)
return (b - a) * x + a
def _interp(y, x_new):
"""Interpolate the function defined by (arange(0, len(y)), y) at positions x_new."""
x_low = x_new.astype(mx.int32)
x_high = mx.minimum(x_low + 1, len(y) - 1)
y_low = y[x_low]
y_high = y[x_high]
delta_x = x_new - x_low
y_new = y_low * (1 - delta_x) + delta_x * y_high
return y_new
class SimpleEulerSampler:
"""A simple Euler integrator that can be used to sample from our diffusion models.
The method ``step()`` performs one Euler step from x_t to x_t_prev.
"""
def __init__(self, config: DiffusionConfig):
# Compute the noise schedule
if config.beta_schedule == "linear":
betas = _linspace(
config.beta_start, config.beta_end, config.num_train_steps
)
elif config.beta_schedule == "scaled_linear":
betas = _linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_steps
).square()
else:
raise NotImplementedError(f"{config.beta_schedule} is not implemented.")
alphas = 1 - betas
alphas_cumprod = mx.cumprod(alphas)
self._sigmas = mx.concatenate(
[mx.zeros(1), ((1 - alphas_cumprod) / alphas_cumprod).sqrt()]
)
@property
def max_time(self):
return len(self._sigmas) - 1
def sample_prior(self, shape, dtype=mx.float32, key=None):
noise = mx.random.normal(shape, key=key)
return (
noise * self._sigmas[-1] * (self._sigmas[-1].square() + 1).rsqrt()
).astype(dtype)
def add_noise(self, x, t, key=None):
noise = mx.random.normal(x.shape, key=key)
s = self.sigmas(t)
return (x + noise * s) * (s.square() + 1).rsqrt()
def sigmas(self, t):
return _interp(self._sigmas, t)
def timesteps(self, num_steps: int, start_time=None, dtype=mx.float32):
start_time = start_time or (len(self._sigmas) - 1)
assert 0 < start_time <= (len(self._sigmas) - 1)
steps = _linspace(start_time, 0, num_steps + 1).astype(dtype)
return list(zip(steps, steps[1:]))
def step(self, eps_pred, x_t, t, t_prev):
sigma = self.sigmas(t).astype(eps_pred.dtype)
sigma_prev = self.sigmas(t_prev).astype(eps_pred.dtype)
dt = sigma_prev - sigma
x_t_prev = (sigma.square() + 1).sqrt() * x_t + eps_pred * dt
x_t_prev = x_t_prev * (sigma_prev.square() + 1).rsqrt()
return x_t_prev
class SimpleEulerAncestralSampler(SimpleEulerSampler):
def step(self, eps_pred, x_t, t, t_prev):
sigma = self.sigmas(t).astype(eps_pred.dtype)
sigma_prev = self.sigmas(t_prev).astype(eps_pred.dtype)
sigma2 = sigma.square()
sigma_prev2 = sigma_prev.square()
sigma_up = (sigma_prev2 * (sigma2 - sigma_prev2) / sigma2).sqrt()
sigma_down = (sigma_prev2 - sigma_up**2).sqrt()
dt = sigma_down - sigma
x_t_prev = (sigma2 + 1).sqrt() * x_t + eps_pred * dt
noise = mx.random.normal(x_t_prev.shape).astype(x_t_prev.dtype)
x_t_prev = x_t_prev + noise * sigma_up
x_t_prev = x_t_prev * (sigma_prev2 + 1).rsqrt()
return x_t_prev
|
// Copyright © 2024 Apple Inc.
import Foundation
import MLX
// port of https://github.com/ml-explore/mlx-examples/blob/main/stable_diffusion/stable_diffusion/sampler.py
/// Interpolate the function defined by `(0 ..< y.count) y)` at positions `xNew`.
func interpolate(y: MLXArray, xNew: MLXArray) -> MLXArray {
let xLow = xNew.asType(.int32)
let xHigh = minimum(xLow + 1, y.count - 1)
let yLow = y[xLow]
let yHigh = y[xHigh]
let deltaX = xNew - xLow
let yNew = yLow * (1 - deltaX) + deltaX * yHigh
return yNew
}
/// A simple Euler integrator that can be used to sample from our diffusion models.
///
/// The method ``step()`` performs one Euler step from `x_t` to `x_t_prev`.
class SimpleEulerSampler {
let sigmas: MLXArray
public init(configuration: DiffusionConfiguration) {
let betas: MLXArray
// compute the noise schedule
switch configuration.betaSchedule {
case .linear:
betas = MLXArray.linspace(
configuration.betaStart, configuration.betaEnd, count: configuration.trainSteps)
case .scaledLinear:
betas = MLXArray.linspace(
sqrt(configuration.betaStart), sqrt(configuration.betaEnd),
count: configuration.trainSteps
).square()
}
let alphas = 1 - betas
let alphasCumprod = cumprod(alphas)
self.sigmas = concatenated([
MLXArray.zeros([1]), ((1 - alphasCumprod) / alphasCumprod).sqrt(),
])
}
public var maxTime: Int {
sigmas.count - 1
}
public func samplePrior(shape: [Int], dType: DType = .float32, key: MLXArray? = nil) -> MLXArray
{
let noise = MLXRandom.normal(shape, key: key)
return (noise * sigmas[-1] * (sigmas[-1].square() + 1).rsqrt()).asType(dType)
}
public func addNoise(x: MLXArray, t: MLXArray, key: MLXArray? = nil) -> MLXArray {
let noise = MLXRandom.normal(x.shape, key: key)
let s = sigmas(t)
return (x + noise * s) * (s.square() + 1).rsqrt()
}
public func sigmas(_ t: MLXArray) -> MLXArray {
interpolate(y: sigmas, xNew: t)
}
public func timeSteps(steps: Int, start: Int? = nil, dType: DType = .float32) -> [(
MLXArray, MLXArray
)] {
let start = start ?? (sigmas.count - 1)
precondition(0 < start)
precondition(start <= sigmas.count - 1)
let steps = MLX.linspace(start, 0, count: steps + 1).asType(dType)
return Array(zip(steps, steps[1...]))
}
open func step(epsPred: MLXArray, xt: MLXArray, t: MLXArray, tPrev: MLXArray) -> MLXArray {
let dtype = epsPred.dtype
let sigma = sigmas(t).asType(dtype)
let sigmaPrev = sigmas(tPrev).asType(dtype)
let dt = sigmaPrev - sigma
var xtPrev = (sigma.square() + 1).sqrt() * xt + epsPred * dt
xtPrev = xtPrev * (sigmaPrev.square() + 1).rsqrt()
return xtPrev
}
}
class SimpleEulerAncestralSampler: SimpleEulerSampler {
open override func step(epsPred: MLXArray, xt: MLXArray, t: MLXArray, tPrev: MLXArray)
-> MLXArray
{
let dtype = epsPred.dtype
let sigma = sigmas(t).asType(dtype)
let sigmaPrev = sigmas(tPrev).asType(dtype)
let sigma2 = sigma.square()
let sigmaPrev2 = sigmaPrev.square()
let sigmaUp = (sigmaPrev2 * (sigma2 - sigmaPrev2) / sigma2).sqrt()
let sigmaDown = (sigmaPrev2 - sigmaUp ** 2).sqrt()
let dt = sigmaDown - sigma
var xtPrev = (sigma2 + 1).sqrt() * xt + epsPred * dt
let noise = MLXRandom.normal(xtPrev.shape).asType(xtPrev.dtype)
xtPrev = xtPrev + noise * sigmaUp
xtPrev = xtPrev * (sigmaPrev2 + 1).rsqrt()
return xtPrev
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
LM
|
Bitnet
|
# Copyright © 2023-2024 Apple Inc.
from dataclasses import dataclass
from functools import partial
from typing import Any, Dict, Optional, Union
import mlx.core as mx
import mlx.nn as nn
from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention
from .bitlinear_layers import BitLinear
from .rope_utils import initialize_rope
@dataclass
class ModelArgs(BaseModelArgs):
model_type: str
hidden_size: int
num_hidden_layers: int
intermediate_size: int
num_attention_heads: int
num_key_value_heads: int
rms_norm_eps: float
vocab_size: int
head_dim: Optional[int] = None
max_position_embeddings: Optional[int] = None
attention_bias: bool = False
mlp_bias: bool = False
rope_theta: float = 10000
rope_traditional: bool = False
rope_scaling: Optional[Dict[str, Union[float, str]]] = None
tie_word_embeddings: bool = True
class Attention(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
dim = args.hidden_size
self.n_heads = n_heads = args.num_attention_heads
self.n_kv_heads = n_kv_heads = args.num_key_value_heads
self.head_dim = head_dim = args.head_dim or args.hidden_size // n_heads
self.scale = head_dim**-0.5
attention_bias = args.attention_bias
self.q_proj = BitLinear(dim, n_heads * head_dim, bias=attention_bias)
self.k_proj = BitLinear(dim, n_kv_heads * head_dim, bias=attention_bias)
self.v_proj = BitLinear(dim, n_kv_heads * head_dim, bias=attention_bias)
self.o_proj = BitLinear(n_heads * head_dim, dim, bias=attention_bias)
self.rope = initialize_rope(
self.head_dim,
args.rope_theta,
args.rope_traditional,
args.rope_scaling,
args.max_position_embeddings,
)
self.attn_sub_norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self,
x: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Any] = None,
) -> mx.array:
B, L, D = x.shape
queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x)
# Prepare the queries, keys and values for the attention computation
queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3)
keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
if cache is not None:
queries = self.rope(queries, offset=cache.offset)
keys = self.rope(keys, offset=cache.offset)
keys, values = cache.update_and_fetch(keys, values)
else:
queries = self.rope(queries)
keys = self.rope(keys)
output = scaled_dot_product_attention(
queries, keys, values, cache=cache, scale=self.scale, mask=mask
)
output = output.transpose(0, 2, 1, 3).reshape(B, L, -1)
output = self.attn_sub_norm(output)
output = self.o_proj(output)
return output
@partial(mx.compile, shapeless=True)
def relu2(x):
return mx.square(nn.relu(x))
class MLP(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
dim = args.hidden_size
hidden_dim = args.intermediate_size
if hasattr(args, "mlp_bias"):
mlp_bias = args.mlp_bias
else:
mlp_bias = False
self.gate_proj = BitLinear(dim, hidden_dim, bias=mlp_bias)
self.down_proj = BitLinear(hidden_dim, dim, bias=mlp_bias)
self.up_proj = BitLinear(dim, hidden_dim, bias=mlp_bias)
self.ffn_sub_norm = nn.RMSNorm(args.intermediate_size, eps=args.rms_norm_eps)
def __call__(self, x) -> mx.array:
x = relu2(self.gate_proj(x)) * self.up_proj(x)
x = self.ffn_sub_norm(x)
x = self.down_proj(x)
return x
class TransformerBlock(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.num_attention_heads = args.num_attention_heads
self.hidden_size = args.hidden_size
self.self_attn = Attention(args)
self.mlp = MLP(args)
self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
self.post_attention_layernorm = nn.RMSNorm(
args.hidden_size, eps=args.rms_norm_eps
)
def __call__(
self,
x: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Any] = None,
) -> mx.array:
r = self.self_attn(self.input_layernorm(x), mask, cache)
h = x + r
r = self.mlp(self.post_attention_layernorm(h))
out = h + r
return out
class LlamaModel(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.num_hidden_layers = args.num_hidden_layers
self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size)
self.layers = [
TransformerBlock(args=args) for _ in range(args.num_hidden_layers)
]
self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self,
inputs: mx.array,
mask: mx.array = None,
cache=None,
):
h = self.embed_tokens(inputs)
if mask is None:
mask = create_attention_mask(h, cache)
if cache is None:
cache = [None] * len(self.layers)
for layer, c in zip(self.layers, cache):
h = layer(h, mask, cache=c)
return self.norm(h)
class Model(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
self.model_type = args.model_type
self.model = LlamaModel(args)
if not args.tie_word_embeddings:
self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
mask: mx.array = None,
cache=None,
):
out = self.model(inputs, mask, cache)
if self.args.tie_word_embeddings:
out = self.model.embed_tokens.as_linear(out)
else:
out = self.lm_head(out)
return out
def sanitize(self, weights):
# Remove unused precomputed rotary freqs
weights = {
k: v for k, v in weights.items() if "self_attn.rotary_emb.inv_freq" not in k
}
if self.args.tie_word_embeddings:
weights.pop("lm_head.weight", None)
return weights
@property
def layers(self):
return self.model.layers
|
//
// Bitnet.swift
// mlx-swift-examples
//
// Created by John Mai on 2025/6/12.
//
import Foundation
import MLX
import MLXFast
import MLXLMCommon
import MLXNN
import Tokenizers
// port of https://github.com/ml-explore/mlx-lm/blob/main/mlx_lm/models/bitnet.py
private func makeBitLinearKernel() -> MLXFast.MLXFastKernel {
let source = """
constexpr int M = 4;
constexpr int BLOCK = 32;
uint tid = thread_position_in_grid.y;
uint in_offset = thread_position_in_grid.x;
uint batch_idx = tid / (out_features / 4);
uint row_idx = tid % (out_features / 4);
float sum[4] = {0.0};
for (uint i = in_offset * M; i < in_features; i += BLOCK * M) {
float v[M];
for (int j=0; j<M; j++) {
v[j] = x[batch_idx * in_features + i + j];
}
for (int j=0; j<M; j++) {
uint8_t w = packed_weights[row_idx * in_features + i + j];
sum[0] += v[j] * ((w & 3) - 1);
sum[1] += v[j] * (((w >> 2) & 3) - 1);
sum[2] += v[j] * (((w >> 4) & 3) - 1);
sum[3] += v[j] * (((w >> 6) & 3) - 1);
}
}
for (int j=0; j<4; j++) {
sum[j] = simd_sum(sum[j]);
}
// Apply weight scaling by diving them or multiplying them
if (in_offset == 0) {
float scale = invert_weight_scales ? 1 / weight_scale[0] : weight_scale[0];
for (int i=0; i<4; i++) {
out[batch_idx * out_features + row_idx + i * (out_features/4)] = static_cast<T>(sum[i] * scale);
}
}
"""
return metalKernel(
name: "bitlinear_matmul",
inputNames: ["x", "packed_weights", "weight_scale"],
outputNames: ["out"],
source: source
)
}
private final class BitLinearKernelManager: @unchecked Sendable {
static let shared = BitLinearKernelManager()
let bitlinearKernel: MLXFast.MLXFastKernel
private init() {
bitlinearKernel = makeBitLinearKernel()
}
}
private class BitLinear: Module {
let inFeatures: Int
let outFeatures: Int
let invertWeightScales: Bool
let weight: MLXArray
let bias: MLXArray?
@ModuleInfo(key: "weight_scale") var weightScale: MLXArray
init(
_ inFeatures: Int,
_ outFeatures: Int,
bias: Bool = true,
invertWeightScales: Bool = false
) {
self.inFeatures = inFeatures
self.outFeatures = outFeatures
let packedOutFeatures = Int(floor(Double(outFeatures + 3) / 4.0))
self.weight = MLXArray.zeros([packedOutFeatures, inFeatures], dtype: .uint8)
self.invertWeightScales = invertWeightScales
self._weightScale.wrappedValue = MLXArray([1.0])
if bias {
self.bias = MLXArray.zeros([outFeatures])
} else {
self.bias = nil
}
super.init()
}
private func executeMatmulKernel(_ x: MLXArray, _ packedWeights: MLXArray) -> MLXArray {
let originalShape = x.shape
var x = x
if originalShape.count > 2 {
x = x.reshaped(-1, originalShape[originalShape.count - 1])
}
let totalBatchElements = x.dim(0)
let inFeatures = x.dim(1)
let outFeatures = self.outFeatures
let dtype = self.weightScale.dtype
assert(x.dtype == dtype, "Wrong type for input.")
var outputs = BitLinearKernelManager.shared.bitlinearKernel(
[x, packedWeights, weightScale],
template: [
("T", dtype),
("invert_weight_scales", invertWeightScales),
("in_features", inFeatures),
("out_features", outFeatures),
],
grid: (32, Int(floor(Double(totalBatchElements * outFeatures / 4))), 1),
threadGroup: (32, 1, 1),
outputShapes: [[totalBatchElements, outFeatures]],
outputDTypes: [dtype]
)[0]
if originalShape.count > 2 {
outputs = outputs.reshaped(Array(originalShape.dropLast()) + [outFeatures])
}
return outputs
}
func callAsFunction(_ x: MLXArray) -> MLXArray {
var y = executeMatmulKernel(x, weight)
if let bias {
y = y + bias
}
return y
}
}
// MARK: - Model Configuration
public struct BitnetConfiguration: Codable, Sendable {
var modelType: String
var hiddenSize: Int
var hiddenLayers: Int
var intermediateSize: Int
var attentionHeads: Int
var rmsNormEps: Float
var vocabularySize: Int
var headDimensions: Int?
var maxPositionEmbeddings: Int?
var kvHeads: Int?
var attentionBias: Bool
var mlpBias: Bool
var ropeTheta: Float
var ropeTraditional: Bool
var ropeScaling: [String: StringOrNumber]?
var tieWordEmbeddings: Bool
public init(
modelType: String = "bitnet",
hiddenSize: Int,
hiddenLayers: Int,
intermediateSize: Int,
attentionHeads: Int,
rmsNormEps: Float,
vocabularySize: Int,
headDimensions: Int? = nil,
maxPositionEmbeddings: Int? = nil,
kvHeads: Int? = nil,
attentionBias: Bool = false,
mlpBias: Bool = false,
ropeTheta: Float = 10000,
ropeTraditional: Bool = false,
ropeScaling: [String: StringOrNumber]? = nil,
tieWordEmbeddings: Bool = true
) {
self.modelType = modelType
self.hiddenSize = hiddenSize
self.hiddenLayers = hiddenLayers
self.intermediateSize = intermediateSize
self.attentionHeads = attentionHeads
self.rmsNormEps = rmsNormEps
self.vocabularySize = vocabularySize
self.headDimensions = headDimensions
self.maxPositionEmbeddings = maxPositionEmbeddings
self.kvHeads = kvHeads ?? attentionHeads
self.attentionBias = attentionBias
self.mlpBias = mlpBias
self.ropeTheta = ropeTheta
self.ropeTraditional = ropeTraditional
self.ropeScaling = ropeScaling
self.tieWordEmbeddings = tieWordEmbeddings
}
var resolvedKvHeads: Int {
kvHeads ?? attentionHeads
}
var resolvedHeadDimensions: Int {
headDimensions ?? (hiddenSize / attentionHeads)
}
enum CodingKeys: String, CodingKey {
case modelType = "model_type"
case hiddenSize = "hidden_size"
case hiddenLayers = "num_hidden_layers"
case intermediateSize = "intermediate_size"
case attentionHeads = "num_attention_heads"
case rmsNormEps = "rms_norm_eps"
case vocabularySize = "vocab_size"
case headDimensions = "head_dim"
case maxPositionEmbeddings = "max_position_embeddings"
case kvHeads = "num_key_value_heads"
case attentionBias = "attention_bias"
case mlpBias = "mlp_bias"
case ropeTheta = "rope_theta"
case ropeTraditional = "rope_traditional"
case ropeScaling = "rope_scaling"
case tieWordEmbeddings = "tie_word_embeddings"
}
public init(from decoder: Swift.Decoder) throws {
let container = try decoder.container(keyedBy: CodingKeys.self)
modelType = try container.decodeIfPresent(String.self, forKey: .modelType) ?? "bitnet"
hiddenSize = try container.decode(Int.self, forKey: .hiddenSize)
hiddenLayers = try container.decode(Int.self, forKey: .hiddenLayers)
intermediateSize = try container.decode(Int.self, forKey: .intermediateSize)
attentionHeads = try container.decode(Int.self, forKey: .attentionHeads)
rmsNormEps = try container.decode(Float.self, forKey: .rmsNormEps)
vocabularySize = try container.decode(Int.self, forKey: .vocabularySize)
headDimensions = try container.decodeIfPresent(Int.self, forKey: .headDimensions)
maxPositionEmbeddings = try container.decodeIfPresent(
Int.self, forKey: .maxPositionEmbeddings
)
kvHeads = try container.decodeIfPresent(Int.self, forKey: .kvHeads) ?? attentionHeads
attentionBias = try container.decodeIfPresent(Bool.self, forKey: .attentionBias) ?? false
mlpBias = try container.decodeIfPresent(Bool.self, forKey: .mlpBias) ?? false
ropeTheta = try container.decodeIfPresent(Float.self, forKey: .ropeTheta) ?? 10000
ropeTraditional =
try container.decodeIfPresent(Bool.self, forKey: .ropeTraditional) ?? false
ropeScaling = try container.decodeIfPresent(
[String: StringOrNumber].self, forKey: .ropeScaling
)
tieWordEmbeddings =
try container.decodeIfPresent(Bool.self, forKey: .tieWordEmbeddings) ?? true
}
}
// MARK: - Attention
private class Attention: Module {
let args: BitnetConfiguration
let scale: Float
@ModuleInfo(key: "q_proj") var qProj: BitLinear
@ModuleInfo(key: "k_proj") var kProj: BitLinear
@ModuleInfo(key: "v_proj") var vProj: BitLinear
@ModuleInfo(key: "o_proj") var oProj: BitLinear
@ModuleInfo(key: "attn_sub_norm") var attnSubNorm: RMSNorm
let rope: RoPE
init(_ args: BitnetConfiguration) {
self.args = args
let dim = args.hiddenSize
let headDim = args.resolvedHeadDimensions
let nHeads = args.attentionHeads
let nKvHeads = args.resolvedKvHeads
scale = pow(Float(headDim), -0.5)
_qProj.wrappedValue = BitLinear(dim, nHeads * headDim, bias: args.attentionBias)
_kProj.wrappedValue = BitLinear(dim, nKvHeads * headDim, bias: args.attentionBias)
_vProj.wrappedValue = BitLinear(dim, nKvHeads * headDim, bias: args.attentionBias)
_oProj.wrappedValue = BitLinear(nHeads * headDim, dim, bias: args.attentionBias)
_attnSubNorm.wrappedValue = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps)
let ropeScale: Float
if let ropeScaling = args.ropeScaling, ropeScaling["type"] == .string("linear"),
let factor = ropeScaling["factor"]
{
if let v = factor.asFloat() {
ropeScale = 1 / v
} else {
fatalError("ropeScaling.factor must be a float")
}
} else {
ropeScale = 1
}
rope = RoPE(
dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta,
scale: ropeScale
)
}
func callAsFunction(
_ x: MLXArray,
mask: MLXFast.ScaledDotProductAttentionMaskMode,
cache: KVCache?
) -> MLXArray {
let (B, L) = (x.dim(0), x.dim(1))
var queries = qProj(x)
var keys = kProj(x)
var values = vProj(x)
queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3)
keys = keys.reshaped(B, L, args.resolvedKvHeads, -1).transposed(0, 2, 1, 3)
values = values.reshaped(B, L, args.resolvedKvHeads, -1).transposed(0, 2, 1, 3)
if let cache {
queries = rope(queries, offset: cache.offset)
keys = rope(keys, offset: cache.offset)
(keys, values) = cache.update(keys: keys, values: values)
} else {
queries = rope(queries)
keys = rope(keys)
}
let output = MLXFast.scaledDotProductAttention(
queries: queries,
keys: keys,
values: values,
scale: scale,
mask: mask
)
.transposed(0, 2, 1, 3)
.reshaped(B, L, -1)
let normedOutput = attnSubNorm(output)
return oProj(normedOutput)
}
}
// MARK: - MLP
private class MLP: Module {
@ModuleInfo(key: "gate_proj") var gateProj: BitLinear
@ModuleInfo(key: "down_proj") var downProj: BitLinear
@ModuleInfo(key: "up_proj") var upProj: BitLinear
@ModuleInfo(key: "ffn_sub_norm") var ffnSubNorm: RMSNorm
init(_ args: BitnetConfiguration) {
let dim = args.hiddenSize
let hiddenDim = args.intermediateSize
_gateProj.wrappedValue = BitLinear(dim, hiddenDim, bias: args.mlpBias)
_downProj.wrappedValue = BitLinear(hiddenDim, dim, bias: args.mlpBias)
_upProj.wrappedValue = BitLinear(dim, hiddenDim, bias: args.mlpBias)
_ffnSubNorm.wrappedValue = RMSNorm(dimensions: args.intermediateSize, eps: args.rmsNormEps)
}
func callAsFunction(_ x: MLXArray) -> MLXArray {
let gated = reluSquared(gateProj(x)) * upProj(x)
let normed = ffnSubNorm(gated)
return downProj(normed)
}
}
// MARK: - Transformer Block
private class TransformerBlock: Module {
@ModuleInfo(key: "self_attn") var attention: Attention
var mlp: MLP
@ModuleInfo(key: "input_layernorm") var inputLayerNorm: RMSNorm
@ModuleInfo(key: "post_attention_layernorm") var postAttentionLayerNorm: RMSNorm
init(_ args: BitnetConfiguration) {
_attention.wrappedValue = Attention(args)
mlp = MLP(args)
_inputLayerNorm.wrappedValue = RMSNorm(
dimensions: args.hiddenSize, eps: args.rmsNormEps
)
_postAttentionLayerNorm.wrappedValue = RMSNorm(
dimensions: args.hiddenSize, eps: args.rmsNormEps
)
}
func callAsFunction(
_ x: MLXArray,
mask: MLXFast.ScaledDotProductAttentionMaskMode,
cache: KVCache?
) -> MLXArray {
var r = attention(inputLayerNorm(x), mask: mask, cache: cache)
let h = x + r
r = mlp(postAttentionLayerNorm(h))
let out = h + r
return out
}
}
// MARK: - Bitnet Model Inner
private class BitnetModelInner: Module {
@ModuleInfo(key: "embed_tokens") var embedTokens: Embedding
fileprivate let layers: [TransformerBlock]
var norm: RMSNorm
init(_ args: BitnetConfiguration) {
precondition(args.vocabularySize > 0)
_embedTokens.wrappedValue = Embedding(
embeddingCount: args.vocabularySize, dimensions: args.hiddenSize
)
layers = (0 ..< args.hiddenLayers).map { _ in
TransformerBlock(args)
}
norm = RMSNorm(dimensions: args.hiddenSize, eps: args.rmsNormEps)
}
func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray {
var h = embedTokens(inputs)
let mask = createAttentionMask(h: h, cache: cache)
for (i, layer) in layers.enumerated() {
h = layer(h, mask: mask, cache: cache?[i])
}
return norm(h)
}
}
// MARK: - Bitnet Model
public class BitnetModel: Module, LLMModel, KVCacheDimensionProvider {
public let vocabularySize: Int
public let kvHeads: [Int]
fileprivate let model: BitnetModelInner
let configuration: BitnetConfiguration
@ModuleInfo(key: "lm_head") var lmHead: Linear?
public init(_ args: BitnetConfiguration) {
configuration = args
vocabularySize = args.vocabularySize
kvHeads = (0 ..< args.hiddenLayers).map { _ in args.resolvedKvHeads }
model = BitnetModelInner(args)
if !args.tieWordEmbeddings {
_lmHead.wrappedValue = Linear(args.hiddenSize, args.vocabularySize, bias: false)
}
}
public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray {
let out = model(inputs, cache: cache)
if let lmHead {
return lmHead(out)
} else {
return model.embedTokens.asLinear(out)
}
}
public func sanitize(weights: [String: MLXArray]) -> [String: MLXArray] {
var weights = weights
weights = weights.filter {
!$0.key.contains("self_attn.rotary_emb.inv_freq")
}
if configuration.tieWordEmbeddings {
weights["lm_head.weight"] = nil
}
return weights
}
}
extension BitnetModel: LoRAModel {
public func loraLinearLayers() -> LoRALinearLayers {
model.layers.map { ($0.attention, ["q_proj", "v_proj"]) }
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
LM
|
Cohere
|
# Copyright © 2023-2024 Apple Inc.
from dataclasses import dataclass
from typing import Any, Optional
import mlx.core as mx
import mlx.nn as nn
from .base import BaseModelArgs, create_attention_mask, scaled_dot_product_attention
@dataclass
class ModelArgs(BaseModelArgs):
model_type: str
hidden_size: int = 8192
num_hidden_layers: int = 40
intermediate_size: int = 22528
num_attention_heads: int = 64
num_key_value_heads: int = 64
rope_theta: float = 8000000.0
vocab_size: int = 256000
layer_norm_eps: float = 1e-05
logit_scale: float = 0.0625
attention_bias: bool = False
layer_norm_bias: bool = False
use_qk_norm: bool = False
class LayerNorm2D(nn.Module):
def __init__(self, d1, d2, eps):
super().__init__()
self.weight = mx.zeros((d1, d2))
self.eps = eps
def __call__(self, x):
return self.weight * mx.fast.layer_norm(x, None, None, self.eps)
class Attention(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
dim = args.hidden_size
self.n_heads = n_heads = args.num_attention_heads
self.n_kv_heads = n_kv_heads = args.num_key_value_heads
head_dim = args.hidden_size // args.num_attention_heads
self.scale = head_dim**-0.5
attetion_bias = args.attention_bias
self.q_proj = nn.Linear(dim, n_heads * head_dim, bias=attetion_bias)
self.k_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attetion_bias)
self.v_proj = nn.Linear(dim, n_kv_heads * head_dim, bias=attetion_bias)
self.o_proj = nn.Linear(n_heads * head_dim, dim, bias=attetion_bias)
self.use_qk_norm = args.use_qk_norm
if self.use_qk_norm:
self.q_norm = LayerNorm2D(self.n_heads, head_dim, eps=args.layer_norm_eps)
self.k_norm = LayerNorm2D(
self.n_kv_heads, head_dim, eps=args.layer_norm_eps
)
self.rope = nn.RoPE(head_dim, traditional=True, base=args.rope_theta)
def __call__(
self,
x: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Any] = None,
) -> mx.array:
B, L, D = x.shape
queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x)
queries = queries.reshape(B, L, self.n_heads, -1)
keys = keys.reshape(B, L, self.n_kv_heads, -1)
if self.use_qk_norm:
queries = self.q_norm(queries)
keys = self.k_norm(keys)
queries = queries.transpose(0, 2, 1, 3)
keys = keys.transpose(0, 2, 1, 3)
values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
if cache is not None:
queries = self.rope(queries, offset=cache.offset)
keys = self.rope(keys, offset=cache.offset)
keys, values = cache.update_and_fetch(keys, values)
else:
queries = self.rope(queries)
keys = self.rope(keys)
output = scaled_dot_product_attention(
queries, keys, values, cache=cache, scale=self.scale, mask=mask
)
output = output.transpose(0, 2, 1, 3).reshape(B, L, -1)
return self.o_proj(output)
class MLP(nn.Module):
def __init__(self, dim, hidden_dim):
super().__init__()
self.gate_proj = nn.Linear(dim, hidden_dim, bias=False)
self.up_proj = nn.Linear(dim, hidden_dim, bias=False)
self.down_proj = nn.Linear(hidden_dim, dim, bias=False)
def __call__(self, x):
return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x))
class TransformerBlock(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.hidden_size = args.hidden_size
self.n_heads = args.num_attention_heads
self.self_attn = Attention(args)
self.mlp = MLP(args.hidden_size, args.intermediate_size)
self.input_layernorm = nn.LayerNorm(
args.hidden_size, eps=args.layer_norm_eps, bias=args.layer_norm_bias
)
self.args = args
def __call__(
self,
x: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Any] = None,
) -> mx.array:
h = self.input_layernorm(x)
attn_h = self.self_attn(h, mask, cache)
ff_h = self.mlp(h)
return attn_h + ff_h + x
class CohereModel(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.num_hidden_layers = args.num_hidden_layers
assert self.vocab_size > 0
self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size)
self.layers = [
TransformerBlock(args=args) for _ in range(args.num_hidden_layers)
]
self.norm = nn.LayerNorm(
args.hidden_size, eps=args.layer_norm_eps, bias=args.layer_norm_bias
)
def __call__(
self,
inputs: mx.array,
mask: mx.array = None,
cache=None,
):
h = self.embed_tokens(inputs)
if mask is None:
mask = create_attention_mask(h, cache)
if cache is None:
cache = [None] * len(self.layers)
for layer, c in zip(self.layers, cache):
h = layer(h, mask, c)
return self.norm(h)
class Model(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.model_type = args.model_type
self.model = CohereModel(args)
self.args = args
def __call__(
self,
inputs: mx.array,
mask: mx.array = None,
cache=None,
):
out = self.model(inputs, mask, cache)
out = self.model.embed_tokens.as_linear(out)
out = out * self.model.args.logit_scale
return out
@property
def layers(self):
return self.model.layers
|
import Foundation
import MLX
import MLXLMCommon
import MLXNN
// port of https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/models/cohere.py
private class Attention: Module {
let args: CohereConfiguration
let scale: Float
@ModuleInfo(key: "q_proj") var wq: Linear
@ModuleInfo(key: "k_proj") var wk: Linear
@ModuleInfo(key: "v_proj") var wv: Linear
@ModuleInfo(key: "o_proj") var wo: Linear
let rope: RoPE
public init(_ args: CohereConfiguration) {
self.args = args
let dim = args.hiddenSize
let heads = args.attentionHeads
let kvHeads = args.kvHeads
let headDim = args.hiddenSize / heads
self.scale = pow(Float(headDim), -0.5)
self._wq.wrappedValue = Linear(dim, heads * headDim, bias: false)
self._wk.wrappedValue = Linear(dim, kvHeads * headDim, bias: false)
self._wv.wrappedValue = Linear(dim, kvHeads * headDim, bias: false)
self._wo.wrappedValue = Linear(heads * headDim, dim, bias: false)
self.rope = RoPE(
dimensions: headDim, traditional: args.ropeTraditional, base: args.ropeTheta)
}
public func callAsFunction(
_ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache?
) -> MLXArray {
let (B, L) = (x.dim(0), x.dim(1))
var queries = wq(x)
var keys = wk(x)
var values = wv(x)
// prepare the queries, keys and values for the attention computation
queries = queries.reshaped(B, L, args.attentionHeads, -1).transposed(0, 2, 1, 3)
keys = keys.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3)
values = values.reshaped(B, L, args.kvHeads, -1).transposed(0, 2, 1, 3)
if let cache {
queries = rope(queries, offset: cache.offset)
keys = rope(keys, offset: cache.offset)
} else {
queries = rope(queries)
keys = rope(keys)
}
let output = attentionWithCacheUpdate(
queries: queries,
keys: keys,
values: values,
cache: cache,
scale: scale,
mask: mask
)
.transposed(0, 2, 1, 3)
.reshaped(B, L, -1)
return wo(output)
}
}
private class MLP: Module, UnaryLayer {
@ModuleInfo(key: "gate_proj") var gate: Linear
@ModuleInfo(key: "down_proj") var down: Linear
@ModuleInfo(key: "up_proj") var up: Linear
public init(dimensions: Int, hiddenDimensions: Int) {
self._gate.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false)
self._up.wrappedValue = Linear(dimensions, hiddenDimensions, bias: false)
self._down.wrappedValue = Linear(hiddenDimensions, dimensions, bias: false)
}
public func callAsFunction(_ x: MLXArray) -> MLXArray {
down(silu(gate(x)) * up(x))
}
}
private class TransformerBlock: Module {
@ModuleInfo(key: "self_attn") var attention: Attention
let mlp: MLP
@ModuleInfo(key: "input_layernorm") var inputLayerNorm: LayerNorm
public init(_ args: CohereConfiguration) {
self._attention.wrappedValue = Attention(args)
self.mlp = MLP(dimensions: args.hiddenSize, hiddenDimensions: args.intermediateSize)
self._inputLayerNorm.wrappedValue = LayerNorm(
dimensions: args.hiddenSize, eps: args.layerNormEps)
}
public func callAsFunction(
_ x: MLXArray, mask: MLXFast.ScaledDotProductAttentionMaskMode, cache: KVCache?
) -> MLXArray {
let h = inputLayerNorm(x)
let attnH = attention(h, mask: mask, cache: cache)
let ffH = mlp(h)
return attnH + ffH + x
}
}
public class CohereModelInner: Module {
@ModuleInfo(key: "embed_tokens") var embedTokens: Embedding
fileprivate let layers: [TransformerBlock]
let norm: LayerNorm
public init(_ args: CohereConfiguration) {
precondition(args.vocabularySize > 0)
self._embedTokens.wrappedValue = Embedding(
embeddingCount: args.vocabularySize, dimensions: args.hiddenSize)
self.layers = (0 ..< args.hiddenLayers)
.map { _ in
TransformerBlock(args)
}
self.norm = LayerNorm(dimensions: args.hiddenSize, eps: args.layerNormEps)
}
public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]? = nil) -> MLXArray {
var h = embedTokens(inputs)
let mask = createAttentionMask(h: h, cache: cache)
for (i, layer) in layers.enumerated() {
h = layer(h, mask: mask, cache: cache?[i])
}
return norm(h)
}
}
public class CohereModel: Module, LLMModel, KVCacheDimensionProvider {
public let vocabularySize: Int
public let kvHeads: [Int]
let model: CohereModelInner
let logitScale: Float
public init(_ args: CohereConfiguration) {
self.vocabularySize = args.vocabularySize
self.kvHeads = (0 ..< args.hiddenLayers).map { _ in args.kvHeads }
self.model = CohereModelInner(args)
self.logitScale = args.logitScale
}
public func callAsFunction(_ inputs: MLXArray, cache: [KVCache]?) -> MLXArray {
var out = model(inputs, cache: cache)
out = model.embedTokens.asLinear(out)
out = out * self.logitScale
return out
}
}
public struct CohereConfiguration: Codable, Sendable {
var hiddenSize: Int
var hiddenLayers: Int
var intermediateSize: Int
var attentionHeads: Int
var layerNormEps: Float
var vocabularySize: Int
var kvHeads: Int
var ropeTheta: Float = 8000000.0
var ropeTraditional: Bool = true
var ropeScaling: [String: StringOrNumber]? = nil
var logitScale: Float
enum CodingKeys: String, CodingKey {
case hiddenSize = "hidden_size"
case hiddenLayers = "num_hidden_layers"
case intermediateSize = "intermediate_size"
case attentionHeads = "num_attention_heads"
case kvHeads = "num_key_value_heads"
case ropeTheta = "rope_theta"
case vocabularySize = "vocab_size"
case layerNormEps = "layer_norm_eps"
case logitScale = "logit_scale"
case ropeTraditional = "rope_traditional"
case ropeScaling = "rope_scaling"
}
public init(from decoder: Decoder) throws {
// custom implementation to handle optional keys with required values
let container: KeyedDecodingContainer<CohereConfiguration.CodingKeys> =
try decoder.container(
keyedBy: CohereConfiguration.CodingKeys.self)
self.hiddenSize = try container.decode(
Int.self, forKey: CohereConfiguration.CodingKeys.hiddenSize)
self.hiddenLayers = try container.decode(
Int.self, forKey: CohereConfiguration.CodingKeys.hiddenLayers)
self.intermediateSize = try container.decode(
Int.self, forKey: CohereConfiguration.CodingKeys.intermediateSize)
self.attentionHeads = try container.decode(
Int.self, forKey: CohereConfiguration.CodingKeys.attentionHeads)
self.layerNormEps = try container.decode(
Float.self, forKey: CohereConfiguration.CodingKeys.layerNormEps)
self.vocabularySize = try container.decode(
Int.self, forKey: CohereConfiguration.CodingKeys.vocabularySize)
self.kvHeads = try container.decode(
Int.self, forKey: CohereConfiguration.CodingKeys.kvHeads)
self.ropeTheta =
try container.decodeIfPresent(
Float.self, forKey: CohereConfiguration.CodingKeys.ropeTheta)
?? 8000000.0
self.ropeScaling = try container.decodeIfPresent(
[String: StringOrNumber].self, forKey: CohereConfiguration.CodingKeys.ropeScaling)
self.logitScale = try container.decode(
Float.self, forKey: CohereConfiguration.CodingKeys.logitScale)
}
}
// MARK: - LoRA
extension CohereModel: LoRAModel {
public func loraLinearLayers() -> LoRALinearLayers {
model.layers.map { ($0.attention, ["q_proj", "v_proj"]) }
}
}
| null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null | null |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 14