Food Isotherms via Statistical Thermodynamics
Quick Start
Food/water sotherms are often described and interpreted by models such as BET or GAB that themselves contain assumptions that are often invalid. Here we implement the stat-therm, universal, ABC isotherm that can analyse raw data directly or convert from many of the common isotherms that others choose to use. There is a more general version for those interested a wide range of Isotherms via Stat Therm.
Credits
The app accompanies a forthcoming (2025) book chapter from Abbott, Harton, Matubayasi and Shimizu1 which is based on core papers by Shimizu and Matubayasi2-4.
Food Isotherms
//One universal basic required here to get things going once loaded
"use strict"
window.onload = function () {
//restoreDefaultValues(); //Un-comment this if you want to start with defaults
document.getElementById('Load').addEventListener('click', clearOldName, false);
document.getElementById('Load').addEventListener('change', handleFileSelect, false);
document.getElementById('Reverse').addEventListener("click", DoModel)
// document.getElementById('Parameters').addEventListener('change', DoModel, false);
Main();
};
//Any global variables go here
let amUpdating = false, lastFit = "", newFit = false, lastQ = 0, lastUnits = "", lastProbe = "", lastT = 0, divBy = 100, ModelLabel = "Model Parameters", G22conv = 1
let fitData = [], naFitData = [], scaleData = [], Loading = false, oldParams = ""
//Main is hard wired as THE place to start calculating when input changes
//It does no calculations itself, it merely sets them up, sends off variables, gets results and, if necessary, plots them.
function Main() {
if (amUpdating) return
saveSettings();
if (Loading) return
//Send all the inputs as a structured object
//If you need to convert to, say, SI units, do it here!
const inputs = {
A: sliders.SlideA.value,
B: sliders.SlideB.value,
C: sliders.SlideC.value,
amax: sliders.Slideamax.value,
Q: sliders.SlideQ.value,
Probe: "Water; 18; 11; 8.071; 1729.8; 233.4",
T: 25,
Units: "g/100g",
Model: document.getElementById('Model').value,
}
//Send inputs off to CalcIt where the names are instantly available
//Get all the resonses as an object, result
if (!amUpdating && document.getElementById('Model').value.includes("File")) console.time("Calc")
const result = CalcIt(inputs)
if (!amUpdating && document.getElementById('Model').value.includes("File")) console.timeEnd("Calc")
//Set all the text box outputs
// document.getElementById('Langmuir').value = result.Lang
document.getElementById('Parameters').value = result.Parameters
document.getElementById('Values').value = result.Values
document.getElementById('ModelLabel').innerHTML = result.ModelLabel
oldParams = document.getElementById('Parameters').value
//Do all relevant plots by calling plotIt - if there's no plot, nothing happens
//plotIt is part of the app infrastructure in app.new.js
if (result.plots) {
for (let i = 0; i < result.plots.length; i++) {
plotIt(result.plots[i], result.canvas[i]);
}
}
//You might have some other stuff to do here, but for most apps that's it for CalcIt!
}
function DoModel() {
const theModel = document.getElementById('Model').value
const params = document.getElementById('Parameters').value
const vals = params.split(/:| |,/)
let A, B, C, tmp = []
if (theModel == "ABC") {
tmp = gettmp(vals)
if (tmp.length == 3) {
A = tmp[0]
B = tmp[1]
C = tmp[2]
amUpdating = true
sliders.SlideA.value = A * divBy
sliders.SlideB.inputValue = B * divBy
sliders.SlideC.inputValue = C * divBy
amUpdating = false
}
}
if (theModel == "GAB") {
tmp = gettmp(vals)
if (tmp.length == 3) {
const nm = tmp[0]
const CB = tmp[1]
const K = tmp[2]
A = 1 / (CB * K * nm); B = (2 - CB) / (CB * nm); C = (2 * K * (CB - 1)) / (CB * nm)
amUpdating = true
sliders.SlideA.value = A
sliders.SlideB.inputValue = B
sliders.SlideC.inputValue = C
amUpdating = false
}
}
if (theModel == "BET") {
tmp = gettmp(vals)
if (tmp.length == 3) {
const nm = tmp[0]
const CB = tmp[1]
const K = tmp[2]
A = 1 / (CB * K * nm); B = (2 - CB) / (CB * nm); C = (2 * K * (CB - 1)) / (CB * nm)
amUpdating = true
sliders.SlideA.value = A
sliders.SlideB.inputValue = B
sliders.SlideC.inputValue = C
amUpdating = false
}
}
if (theModel == "Hailwood-Horrobin") {
tmp = gettmp(vals)
if (tmp.length == 3) {
A = tmp[0]
B = tmp[1]
C = tmp[2]
amUpdating = true
sliders.SlideA.value = A
sliders.SlideB.value = -B
sliders.SlideC.value = 2 * C
amUpdating = false
}
}
// if (theModel == "Langmuir") {
// tmp = gettmp(vals)
// if (tmp.length == 2) {
// const nm = tmp[0], K1 = tmp[1]
// A = 1 / (nm * K1)
// B = 1 / nm
// sliders.SlideA.value = A
// sliders.SlideB.value = -B
// }
// }
Main()
}
function gettmp(vals) {
let i = 0, tmp = []
for (i = 0; i < vals.length; i++) {
if (vals[i] != "" && !isNaN(vals[i])) tmp.push(parseFloat(vals[i]))
}
return tmp
}
//Here's the app calculation
//The inputs are just the names provided - their order in the curly brackets is unimportant!
//By convention the input values are provided with the correct units within Main
function CalcIt({ A, B, C, Q, amax, Model, Probe, Units, T }) {
if (oldParams != "" && document.getElementById('Parameters').value != oldParams) {
oldParams = document.getElementById('Parameters').value
return { Parameters: document.getElementById('Parameters').value }
}
let FQ = 2000 + 2000 * Q * Q
if (document.getElementById('SlideQ').style.visibility == "hidden") FQ = 10000
const ProbeData = Probe.split(";")
const MWt = parseFloat(ProbeData[1])
const Area = parseFloat(ProbeData[2])
const AA = parseFloat(ProbeData[3])
const AB = parseFloat(ProbeData[4])
const AC = parseFloat(ProbeData[5])
if (amUpdating) return { Parameters: "-" }
document.getElementById('Load').disabled = !Model.includes("File")
if (Model == "Peleg" || Model == "Freundlich" || Model == "Fractal-FHH" || Model == "Oswin" || Model == "Dubinin-Radushkevich" || Model == "Halsey" || Model == "Henderson" || Model == "Bradley" ) {
document.getElementById('Parameters').readOnly = true
} else {
amUpdating = true
if (Model.includes("File")) {
document.getElementById('Parameters').readOnly = true
} else {
document.getElementById('Parameters').readOnly = false
}
amUpdating = false
}
if (Model.includes("File")) {
document.getElementById('SlideQ').style.visibility = "visible"
} else {
document.getElementById('SlideQ').style.visibility = "hidden"
Loading = false
}
const RT = 8.314 * (T + 273.15)
let ICurve = [], GCurve = [], NCurve = [], anCurve = [], PCurve = [], naCurve = [], G22 = 0, n = 0, E = 0, F = 0, G = 0, H = 0
let Parameters = "-", Values = "-", PFunc
const a2inc = (Model == "Fractal-FHH" || Model == "Freundlich") ? 0.005 : 0.01
if (!Model.includes("File")) {
fitData = []; naFitData = []
clearOldName()
Loading = false
}
ModelLabel = "Model Parameters"
document.getElementById('Clabel').innerHTML = "C × 100"
//Handle the Files first
let xmax = amax - 0.05
let conv = 1
if (!Units.includes("mol")) conv /= MWt
if (Units.includes("/g") & !Units.includes("mmol") & !Units.includes("μmol")) conv /= 1000
//if (Units.includes("μmol")) conv /= 1000
if (Units.includes("/100g")) conv *= 10
console.log(Units,conv)
if (Model.includes("File") && fitData.length > 3) {
document.getElementById('Slideamax').style.visibility = "hidden"
let x = [], y = []
naFitData = []
for (let i = 0; i < fitData.length; i++) {
x[i] = fitData[i].x
y[i] = fitData[i].y
if (x[i] > 0.025) naFitData.push({ x: fitData[i].x, y: fitData[i].y * divBy / fitData[i].x })
}
xmax = parseFloat(x[fitData.length - 1])
if (amax < xmax) amax = Math.min(1, xmax + 0.05) // Users can choose to have a higher value but not a lower one
ModelLabel = "Model Parameters kg/mol"
if (newFit || Q != lastQ || Probe != lastProbe || Units != lastUnits || lastFit != Model) {
lastFit = Model; lastQ = Q; lastUnits = Units; lastProbe = Probe
amUpdating = true
if (Model.includes("A-B-C Fit")) {
PFunc = function (x, P) {
return x.map(function (xi) {
return xi / (P[0] - P[1] * xi - P[2] / 2 * xi * xi)
})
}
var Parms = fminsearch(PFunc, [1, -5, 5], x, y, {
maxIter: FQ, noNegA: true
})
A = Parms[0]; B = Parms[1]; C = Parms[2];
if (A < 0 ) A = 0.001
A /= conv; B /= conv; C /= conv
sliders.SlideA.value = A
sliders.SlideB.value = B
sliders.SlideC.value = C
}
amUpdating = false
let cText = ( A / divBy).toPrecision(4) + '\t' + (B / divBy).toPrecision(4)
if (Math.abs(C) > 0.001) cText += '\t' + ( C / divBy).toPrecision(4)
if (!Model.includes("Type")) copyTextToClipboard(cText)
let sText = "A: " + (A / divBy).toPrecision(4) + ' B: ' + (B / divBy).toPrecision(4)
if (Math.abs(C) > 0.001) sText += ' C: ' + (C / divBy).toPrecision(4)
if (Model.includes("File") && fitData.length > 3) sText += ' -B/C:' + (-B / C).toPrecision(3)
Parameters = sText
G22conv = 1/conv
newFit = false
}
} else {
document.getElementById('Slideamax').style.visibility = "visible"
lastFit = ""; divBy = 100
}
//Undo conv for plot
let N22min = 999, nmax = 0, nmin = 999, tmp=0
const VPRT = isNaN(AA) ? "-" : Math.pow(10, AA - AB / (AC + T)) / 760 * 100000 / (8.312 * (T + 273.15))
const Gs2 = divBy / (conv * VPRT * A)
G22 = conv * B / divBy
const aM = (-2 * A / B) * (Math.sqrt(1 + (B * B / (2 * A * C))) - 1)
const n2M = (-1 / B) / (1 + (2 * A * C / (B * B))) * divBy
const AvoCalc = 6.02 //Avogadro number, scaled appropriates.
const STSA = Area * AvoCalc * n2M
A *= conv; B *= conv; C *= conv
Values = "Gs2° = " + Gs2.toPrecision(4) + " m³/kg | G22°/v° = " + G22.toPrecision(4) + " kg/mol | STSA = " + STSA.toPrecision(3) + " m²/g | "
for (let a2 = a2inc; a2 <= Math.min(amax, xmax + 0.05); a2 += a2inc) {
n = a2 / (A - C * Math.pow(a2, 2) / 2 - B * a2)
nmin = Math.min(n, nmin)
nmax = Math.max(n, nmax)
G22 = C * a2 + B
tmp = G22conv * G22 * n * divBy / conv
if (tmp < N22min) {
N22min = tmp
}
if (n > 1 || n < -0.02) break
ICurve.push({ x: a2, y: Math.max(0, n * divBy) })
GCurve.push({ x: a2, y: G22conv * G22 / divBy })
NCurve.push({ x: a2, y: 1 * G22 / 1 * n })
if (a2 > 0.025) naCurve.push({ x: a2, y: Math.max(0, n * divBy) / a2 })
if (n * divBy > 0.025) anCurve.push({ x: a2, y: a2 / (n * divBy) })
}
//Rounding error issues for the plot
n = amax / (A - C * Math.pow(amax, 2) / 2 - B * amax)
if (xmax + 0.05 >= amax && n <= 1 && n > 0) ICurve.push({ x: amax, y: n * divBy })
const s = Math.min(Math.max(0, Math.round((-0.5 - N22min / divBy) / 0.05)), 10)
let comment = "Multi-layer <...........> Microporous"
comment = comment.split('');
comment[s + 13] = '|';
comment = comment.join('');
Values +=comment
let MCurve = [], MNCurve = []
MCurve.push({ x: aM, y: 0 }); MCurve.push({ x: aM, y: (nmax + nmin) / 2 * divBy })
MNCurve.push({ x: aM, y: -1 }); MNCurve.push({ x: aM, y: 0 })
//Redo conv for params
A /= conv; B /= conv; C /= conv
if (Model == "Hailwood-Horrobin") {
Parameters = "A: " + (A / divBy).toPrecision(3) + ", B: " + -(B / divBy).toPrecision(3) + ", C: " + (C / divBy / 2).toPrecision(3)
}
if (Model == "ABC") {
Parameters = "A: " + (A / divBy).toPrecision(3) + ", B: " + (B / divBy).toPrecision(3) + ", C: " + (C / divBy).toPrecision(3)
}
if (Model == "GAB") {
//Solve a quadratic to get the values
const ab = C * A, bb = -4 * C * A - 2 * B * B, cb = 4 * C * A + 2 * B * B
const GC = (-bb + Math.sqrt(bb * bb - 4 * ab * cb)) / (2 * ab)
const GM = (2 - GC) / (B * GC)
const GK = B / (A * (2 - GC))
Parameters = "nm: " + GM.toPrecision(3) + " CB: " + GC.toPrecision(3) + " K: " + GK.toPrecision(3)
}
if (Model == "BET") {
document.getElementById('SlideC').style.visibility = "hidden"
document.getElementById('Clabel').innerHTML = "C = 2(A-B)"
const BC = 2 * A - 2 * B
amUpdating = true
sliders.SlideC.inputValue = BC
amUpdating = false
//Solve a quadratic to get the values
const ab = BC * A, bb = -4 * BC * A - 2 * B * B, cb = 4 * BC * A + 2 * B * B
const GC = (-bb + Math.sqrt(bb * bb - 4 * ab * cb)) / (2 * ab)
const GM = (2 - GC) / (B * GC)
const GK = B / (A * (2 - GC))
Parameters = "nm:" + GM.toPrecision(3) + " CB: " + GC.toPrecision(3) + " K: " + GK.toPrecision(3)
}
if (Model == "Peleg") {
PFunc = function (x, P) {
return x.map(function (xi) {
return P[0] * Math.pow(xi, P[1]) + P[2] * Math.pow(xi, P[3])
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
x[i] = ICurve[i].x
y[i] = ICurve[i].y
}
var Parms = fminsearch(PFunc, [1, 0.8, -1, 0.8], x, y, {
maxIter: FQ / 2
})
Parameters = "K1: " + Parms[0].toPrecision(3) + ", N1: " + Parms[1].toPrecision(3) + ", K2: " + Parms[2].toPrecision(3) + ", N2: " + Parms[3].toPrecision(3)
for (let i = 0; i < ICurve.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Fractal-FHH") {
PFunc = function (x, P) {
return x.map(function (xi) {
return P[0] * Math.pow(-RT * Math.log(xi), P[1] - 3)
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x > 0) { //Don't want logs of near-zero
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [10, 2.5], x, y, {
maxIter: FQ
})
Parameters = "K1: " + Parms[0].toPrecision(3) + ", D: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Oswin") {
PFunc = function (x, P) {
return x.map(function (xi) {
return P[0] * Math.pow(xi / (1 - xi), P[1])
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x <= 0.95) {
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [1, 1], x, y, {
maxIter: FQ
})
Parameters = "K1: " + Parms[0].toPrecision(3) + ", N: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Smith") {
PFunc = function (x, P) {
return x.map(function (xi) {
return P[0] - P[1] * Math.log((1 - xi))
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x <= 0.95) {
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [1, 1], x, y, {
maxIter: FQ
})
Parameters = "K1: " + Parms[0].toPrecision(3) + ", K2: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Freundlich") {
PFunc = function (x, P) {
return x.map(function (xi) {
return P[0] * Math.pow(xi, 1 / P[1])
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x <= 0.95) {
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [1, 1], x, y, {
maxIter: FQ
})
Parameters = "K1: " + Parms[0].toPrecision(3) + ", N: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Dubinin-Radushkevich") {
PFunc = function (x, P) {
return x.map(function (xi) {
e = 1 * Math.log(1 + 1 / xi) //Used to be RT, but factors are easier
return P[0] * Math.exp(-P[1] * e * e)
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x > 0) {
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [1, 1e-7], x, y, {
maxIter: FQ
})
Parameters = "K1: " + Parms[0].toPrecision(3) + ", K2: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Halsey") {
PFunc = function (x, P) {
return x.map(function (xi) {
return Math.pow(-P[1] / Math.log(xi), 1 / P[0])
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x > 0) {
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [1, 1], x, y, {
maxIter: FQ
})
Parameters = "n: " + (Parms[0]).toPrecision(3) + ", k: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Henderson") {
PFunc = function (x, P) {
return x.map(function (xi) {
return Math.pow(Math.log(1 - xi) / (-P[0]), 1 / P[1])
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x > 0) {
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [20, 1.1], x, y, {
maxIter: FQ
})
Parameters = "A: " + (Parms[0]).toPrecision(3) + ", B: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
if (Model == "Bradley") {
PFunc = function (x, P) {
return x.map(function (xi) {
//Very easy to get -ve values at low a2
if (xi < 0.005) { return 0 } else { return 0.01 * Math.log(Math.log(1 / xi) / P[1]) / Math.log(P[0]) }
})
}
let x = [], y = []
for (let i = 0; i < ICurve.length; i++) {
if (ICurve[i].x > 0) {
x.push(ICurve[i].x)
y.push(ICurve[i].y)
}
}
var Parms = fminsearch(PFunc, [0.5, 5], x, y, {
maxIter: FQ
})
Parameters = "A: " + (Parms[0]).toPrecision(3) + ", B: " + Parms[1].toPrecision(3)
for (let i = 0; i < x.length; i++) {
PCurve.push({ x: x[i], y: PFunc([x[i]], Parms)[0] })
}
}
const dualPlot = (Model == "Peleg" || Model == "Freundlich" || Model == "Fractal-FHH" || Model == "Oswin" || Model == "Smith" || Model == "Dubinin-Radushkevich" || Model == "Halsey" || Model == "Henderson" || Model == "Bradley")
let plotData = dualPlot ? [PCurve, ICurve, MCurve] : [ICurve, ICurve, MCurve]
let lineLabels = dualPlot ? [Model, "Stat Therm", "M"] : [Model, "Stat Therm", "M"]
let theColors = dualPlot ? ['#88aaff', '#edc240', "gray"] : ['#88aaff', '#edc240', "gray"]
let borderWidth = dualPlot ? [2, 4, 1] : [2, 6, 1]
let thenaColors = ['#edc240', '#88aaff']
let showLines = [true, true, true], showPoints = [0, 0, 0]
let naData = [naCurve, anCurve]
let naLineLabels = ["⟨n2⟩/a2", "a2/⟨n2⟩"]
let G2y2Label = "a2/⟨n2⟩& "
let G2yAxisL1R2 = [1, 2]
if (Model.includes("File") && fitData.length > 3) {
plotData = [scaleData, ICurve, MCurve]
lineLabels = ["Raw", "Stat Therm", "M⟩"]
theColors = ['#6f98ff', '#edc240', "gray"]
thenaColors = ['#6f98ff', '#edc240', '#88aaff']
showLines = [false, true, true], showPoints = [2, 0, 0]
naData = [naFitData, naCurve, anCurve]
naLineLabels = ["Raw", "⟨n2⟩/a2", "a2/⟨n2⟩"]
G2yAxisL1R2 = [1, 1, 2]
}
//Now set up all the graphing data detail by detail.
const prmap = {
plotData: plotData, //An array of 1 or more datasets
lineLabels: lineLabels, //An array of labels for each dataset
borderWidth: borderWidth,
dottedLine: [false, false, true],
colors: theColors,
showLines: showLines,
showPoints: showPoints,
xLabel: "a2& ", //Label for the x axis, with an & to separate the units
yLabel: "⟨n2⟩&" + Units, //Label for the y axis, with an & to separate the units
y2Label: null, //Label for the y2 axis, null if not needed
yAxisL1R2: [], //Array to say which axis each dataset goes on. Blank=Left=1
logX: false, //Is the x-axis in log form?
xTicks: undefined, //We can define a tick function if we're being fancy
logY: false, //Is the y-axis in log form?
yTicks: undefined, //We can define a tick function if we're being fancy
legendPosition: 'top', //Where we want the legend - top, bottom, left, right
xMinMax: [0,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
yMinMax: [0,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
y2MinMax: [,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
xSigFigs: 'F2', //These are the sig figs for the Tooltip readout. A wide choice!
ySigFigs: 'F3', //F for Fixed, P for Precision, E for exponential
};
let ShowUnits = " "
if (Model.includes("File") && !Model.includes("Type") && fitData.length > 3) ShowUnits = "kg/mol"
const prmap1 = {
plotData: [GCurve, NCurve, MNCurve], //An array of 1 or more datasets
colors: theColors,
borderWidth: [3, 3, 1],
dottedLine: [false, false, true],
lineLabels: ["G22 / v", "N22", "M"], //An array of labels for each dataset
xLabel: "a2& ", //Label for the x axis, with an & to separate the units
yLabel: "G22 / v& kg/mol" + ShowUnits, //Label for the y axis, with an & to separate the units
y2Label: "N22& ", //Label for the y2 axis, null if not needed
yAxisL1R2: [1, 2, 2], //Array to say which axis each dataset goes on. Blank=Left=1
logX: false, //Is the x-axis in log form?
xTicks: undefined, //We can define a tick function if we're being fancy
logY: false, //Is the y-axis in log form?
yTicks: undefined, //We can define a tick function if we're being fancy
legendPosition: 'top', //Where we want the legend - top, bottom, left, right
xMinMax: [0,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
yMinMax: [,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
y2MinMax: [,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
xSigFigs: 'P3', //These are the sig figs for the Tooltip readout. A wide choice!
ySigFigs: 'P3', //F for Fixed, P for Precision, E for exponential
};
const prmap2 = {
plotData: naData, //An array of 1 or more datasets
lineLabels: naLineLabels, //An array of labels for each dataset
colors: thenaColors,
showLines: showLines,
showPoints: showPoints,
xLabel: "a2& ", //Label for the x axis, with an & to separate the units
yLabel: "⟨n2⟩ / a2&" + Units, //Label for the y axis, with an & to separate the units
y2Label: G2y2Label, //Label for the y2 axis, null if not needed
yAxisL1R2: G2yAxisL1R2, //Array to say which axis each dataset goes on. Blank=Left=1
logX: false, //Is the x-axis in log form?
xTicks: undefined, //We can define a tick function if we're being fancy
logY: false, //Is the y-axis in log form?
yTicks: undefined, //We can define a tick function if we're being fancy
legendPosition: 'top', //Where we want the legend - top, bottom, left, right
xMinMax: [,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
yMinMax: [0,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
y2MinMax: [,], //Set min and max, e.g. [-10,100], leave one or both blank for auto
xSigFigs: 'P3', //These are the sig figs for the Tooltip readout. A wide choice!
ySigFigs: 'P3', //F for Fixed, P for Precision, E for exponential
};
//Now we return everything - text boxes, plot and the name of the canvas, which is 'canvas' for a single plot
return {
Parameters: Parameters,
Values: Values,
ModelLabel: ModelLabel,
plots: [prmap, prmap1, prmap2],
canvas: ['canvas', 'canvas1', 'canvas2'],
};
}
//The fitting routine
const fminsearch = function (fun, Parm0, x, y, Opt) {
if (!Opt) {
Opt = {}
};
if (!Opt.maxIter) {
Opt.maxIter = 1000
};
if (!Opt.noNegA) {
Opt.noNegA = false
};
if (!Opt.step) { // initial step is 1/100 of initial value (remember not to use zero in Parm0)
Opt.step = Parm0.map(function (p) {
return p / 100
});
Opt.step = Opt.step.map(function (si) {
if (si == 0) {
return 1
} else {
return si
}
}); // convert null steps into 1's
};
if (!Opt.objFun) {
Opt.objFun = function (y, yp) {
return y.map(function (yi, i) {
return Math.pow((yi - yp[i]), 2)
}).reduce(function (a, b) {
return a + b
})
}
} //SSD
var cloneVector = function (V) {
return V.map(function (v) {
return v
})
};
var P0 = cloneVector(Parm0),
P1 = cloneVector(Parm0);
var n = P0.length;
var step = Opt.step;
var funParm = function (P) {
if (Opt.noNegA) P[0] = Math.max(P[0],0.01)
return Opt.objFun(y, fun(x, P))
} //function (of Parameters) to minimize multi-univariate screening
let lastfP = 1e-19, newfP = 0, lasti = 0
const Q = Math.sqrt((Opt.maxIter - 2000) / 2000)
const eps = Math.pow(10, -3 - Q)
for (var i = 0; i < Opt.maxIter; i++) {
for (var j = 0; j < n; j++) { // take a step for each parameter
P1 = cloneVector(P0);
P1[j] += step[j];
if ((!Opt.noNeg || P1[j] > 0) && funParm(P1) < funParm(P0)) { // if parm value going in the righ direction
step[j] = 1.2 * step[j]; // then go a little faster
P0 = cloneVector(P1);
} else {
step[j] = -(0.5 * step[j]); // otherwiese reverse and go slower
}
}
//Stop trying too hard
newfP = funParm(P0)
if (i > 1000 && newfP < lastfP && Math.abs((newfP - lastfP) / lastfP) < eps) break
lastfP = newfP
lasti = i
}
return P0
};
function clearOldName() { //This is needed because otherwise you can't re-load the same file name!
Loading = true
document.getElementById('Load').value = ""
}
function handleFileSelect(evt) {
var f = evt.target.files[0];
if (f) {
var r = new FileReader();
r.onload = function (e) {
var data = e.target.result;
LoadData(data)
}
r.readAsText(f);
} else {
return;
}
}
//Load data from a chosen file
function LoadData(S) {
Papa.parse(S, {
download: false,
header: true,
skipEmptyLines: true,
complete: papaCompleteFn,
error: papaErrorFn
})
}
function papaErrorFn(error, file) {
console.log("Error:", error, file)
}
function papaCompleteFn() {
var theData = arguments[0]
fitData = []; scaleData = []; lastFit = "", newFit = true
if (theData.data.length < 3) return //Not a valid data file
let maxY = 0, i=0, theRow=0
for (i = 0; i < theData.data.length; i++) {
theRow = theData.data[i]
if (theRow.Item.toUpperCase() == "DATA") {
fitData.push({ x: theRow.a, y: theRow.n })
scaleData.push({ x: theRow.a, y: theRow.n })
let y = parseFloat(theRow.n)
if (y > maxY) maxY = y
}
}
if (maxY == 0) return //Invalid data
divBy = 1
if (maxY > 1) { //The general fitting is easier if scaled from ~0-1
if (maxY <= 10000) divBy = 10000
if (maxY <= 1000) divBy = 1000
if (maxY <= 100) divBy = 100
if (maxY <= 10) divBy = 10
for (i = 0; i < fitData.length; i++) {
fitData[i].y /= divBy
}
}
Loading = false
Main()
}
function fallbackCopyTextToClipboard(text) { //Necessary if async not in browser
var textArea = document.createElement("textarea");
textArea.value = text;
document.body.appendChild(textArea);
textArea.focus();
textArea.select();
var elmnt = document.getElementById("network");
elmnt.scrollIntoView();
try {
var successful = document.execCommand('copy');
var msg = successful ? 'successful' : 'unsuccessful';
} catch (err) {
}
document.body.removeChild(textArea);
}
function copyTextToClipboard(text) {
if (!navigator.clipboard) {
fallbackCopyTextToClipboard(text);
return;
}
navigator.clipboard.writeText(text).then(function () {
}, function (err) {
});
}
A lot to talk about
All data are assumed to be in g/100g format. For those who want to load their own data and/or convert from previous fits such a GAB, please jump to the instructions below.
This could have been at least 3 separate apps, but in the end it's a single app because everything is inter-related. So take some time to go through the text here - there's a lot of good stuff to take in.
The adsorption (onto) and absorption (into) of molecules such as water onto/into surfaces [the sorbate molecules interact with the sorbent] can be experimentally measured, and the curve of activity (very often just concentration) versus amount ad/ab-sorbed is called the isotherm. These isotherms have distinctive shapes, often with plateaux but sometimes with inflexion points. There has been a long history of describing these isotherms with models such a Langmuir, BET, GAB etc. based on some plausible science of what is happening in, for example, monolayers and multilayers.
As happens so often in science, helpful abstractions based on simple, limiting cases gain the aura of providing fundamental insights into complex processes. So the various isotherm models have been used in cases where their fundamental assumptions simply cannot apply. At the same time, the complexities of real isotherms invite alternative models, with the official body IUPAC listing 80+ such models. The whole area is a mess because, as has often been shown1, it is possible to fit the same real-world isotherm data with very different isotherm models based on different assumptions.
Fortunately, by going back to the fundamentals, it's rather straightforward to produce an assumption free theory that allows us to understand all basic sorption processes.
We first look how the theory gives us insightful, assumption-free curves for any isotherm, with very little effort. We then see how some modest assumptions behind two classes of isotherms can give us fits where the parameters have deep meanings, more insightful than those which are tied to the numerous models based on inappropriate assumptions.
The Gs2 and G22/v curves
An isotherm is just a plot of 〈n2〉 (the brackets mean "ensemble average"), the amount sorbed per unit mass of sorbent, versus a2, the activity of the sorbate. The "2" means that this is the sorbate as opposed to "s" the surface. If, instead, we plot 〈n2〉/a2 versus a2, we end up with a plot that, when multiplied by a constant, gives us Gs2. This is a Kirkwood-Buff integral which tells us how much extra "2" we have on the surface compared to an average without sorption. Typically it starts off high, meaning that there's a strong attraction, then it reduces because there are fewer available sites then it increases for interesting reasons discussed next.
If you take the derivative of the inverse curve, a2/〈n2〉 you directly obtain G22/v. This is a measure of how many sorbates you have together compared to the average, divided by the "interfacial volume" v which is discussed later. This typically starts off negative, you have fewer sorbates together than the average for the simple reason that if you have one sorbate somewhere you cannot have a second one in the same place; this is the "excluded volume" effect. Eventually you reach a point where you G22/v becomes positive - this means that you have net sorbate-sorbate interactions. The point at which this happens is at exactly the point where Gs2 starts to increase. Now you have more surface-sorbate interactions because sorbate-sorbate interactions (which are themselves enabled by the surface) mean that any sorbate at the surface brings another (fraction of a) sorbate with it.
This molecular picture applies to all possible sorption curves and makes no mention of "monolayer coverage" or "sorbtion sites" or "micropores" and is derived straight from the isotherm itself. What's not to like?
Fitting to meaningful parameters
The problem with the 80+ isotherm models is that they are each based on (conflicting) assumptions, so their parameters (a) apply only to those assumptions and (b) cannot be compared and contrasted with others. See the review by Peleg5 for an independent view on this. What would be preferable would be a small set of models based on the assumption-free stat-therm, that applied to a broad range of cases, so within each type of case there would just be one set of meaningful parameters that everyone can use. For the purpose of this app only the ABC is used. The theory can be contracted to AB or expanded to ABCD but we achieve most of what we need with ABC. Those with obviously cooperative isotherms can explore them using the Cooperative Isotherms app.
The app works in two modes.
- You can play around with different isotherm models in reverse - adjusing ABC to get the parameters appropriate to your chosen model, and see how well (normal) or badly (sometimes) ABC maps onto the other model.
- You can load your own isotherm as a .csv file (described below) and obtain an ABC fit which you can then translate into values for other isotherms.
The ABC fit
Here we focus on a 3-parameter A, B, C fit. Although, like any fit, it contains approximations, it is sufficiently close to the assumption-free theory that the parameters have deep meanings that throw light on the true meaning of the parameters from assumption-filled isotherms such as BET and GAB.
The fit is based on the well-known fact that thermodynamic interactions can involve a "virial expansion". From this modest assumption, the following formula naturally evolves:
`"〈"n_2"〉"=(a_2)/(A-Ba_2-(C(a_2)^2)/2)`
As mentioned above, a fourth term could be added to the bottom `(-D(a_2)^3)/3` for more complex isotherms but we will focus on ABC.
What is striking about the ABC fit is that it can be readily mapped onto classic models such as BET or GAB, allowing the parameters from these popular fits to be re-interpreted without the flawed assumptions behind them.
The stat-therm meaning of A, B, C
The meanings of the parameters are unambiguous and assumption-free.
- A: This is the attraction of individual molecules to the sorbent. We know this because A has a big impact on the isotherm without affecting G22, the interactions between sorbate molecules. More specifically `1/A=c_2^(sat)G_(s2)^0` where C is a constant discussed below along with the units and where the 0 means that this is the surface-sorbate Kirkwood-Buff interval at infinite dilution. It can be likened to what an AFM tip with a single sorbate molecule might find, on average, as it moved over the surface.
- B: This describes the sorbent-induced one-to-one sorbate interactions because `B=G_(22)^0/(v^0)`, and is typically negative because of excluded volume effects.
- C: This is a measure of 3-body interactions - a larger value means stronger sorbate-sorbate interactions on the surface.
BET surface area
Amazingly, the "BET monolayer coverage" is given by `n_m=-1/B=(v^0)/(G_(22)^0)` so is nothing to do with monolayers and instead is a measure of surface-induced sorbate-sorbate interactions. The BET constant is give by `C_B=-B/A`, in other words it says that the BET constant is a mix of G22/v and Gs2 both at infinite dilution.
In simple cases, G22 at infinite dilution is equal to the molar volume of the molecule. So v0/G220 (with units of mol/kg) is a number showing how many moles of virtual isolated molecules could fit on the surface of 1kg of smaple if (as, of course, is not the case) they had the chance. If you multiply this by the area of the molecule (and by Avogardro's number) then you can get a surface area. Maybe a good name for it would be the Virtual Surface Area, VSA. To be consistent with the app, units should be m²/kg but instead the units are the generally-accepted m²/g. By choosing your Sorbate you get the MWt if conversion to moles is required, along with an Area in Ų.
An app for handling BET-style isotherms from the world of Inverse Gas Chromatography, including an extra term to account for "Langmuir sites" is available on the Practical Chromatography site.
Units
Because the ABC fit is based on stat-therm and is universal for the very common Type II isotherms, we can recast previous fits, with their different parameters into a like-for-like comparison via ABC. But for that we need to agree on a common set of units. The units of 〈n2〉 are quantity-sorbate/quantity-sorbent, often g/100g. The units of ABC are the inverse. In terms of mass of sorbent we could choose, g, 100g or kg. It seems best to choose kg as this is an SI unit. And although we could choose mass of sorbate, it seems more universal to use moles. So A, B, C in the app are converted from your choice of original units to the universal `(kg)/(mol)`.
In terms of stat-therm, key values are Gs20 and G220/v0, i.e. the values at infinite dilution. As mentioned above:
`1/A=c_2^(sat)G_(s2)^0`
This means that we need to know that `c_2^(sat)` is the saturated vapour concentration (mol/m³) which in turn is `p^(sat)/(RT)` the saturated vapour pressure over RT. You enter T and from your selected Sorbate the Antoine Coefficients ("A") are used to calculate `c_2^(sat)`, which is shown in the outputs. For water at room temperature, the value is ~1.3 mol/m³. This in turn means that the units of Gs2 are `(mol)/(kg)(m³)/(mol)=(m³)/(kg)`
Because G22/v is simply B then its units are the same `(kg)/(mol)`
Units when converting from other isotherms
If you know that your data all refer to g/100g values as used here, and if you put in, say, your GAB parameters then the conversion will be correct. However, if your GAB parameters were fitted to, say, mole/100g then the nm parameter will be out by a factor of 18 (CB and K are unaffected by units). For other isotherms it's up to you to convert your parameters into their g/100g equivalents. One of the many nice things about ABC is that we can all use the same values without having to bother about converting, say, Oswin parameter units to, say, Smith parameter units.
The Isotherms
In principle, this app could contain all 80+ isotherms that have been proposed and are, to a lesser or greater extent, favoured by different academic traditions. My view is that there is little to be gained from doing that. Instead, some typical, popular isotherms are chosen which capture different traditions such as exponential, logarithmic, polynomial, simple, complex. Here, in alphabetical order, are the isotherms used. For BET and GAB nm is used at the "monolayer coverage", though we now know this isn't the case, and CB is the BET constant. If there is a strong and consistent tradition of naming the parameters, something close to that tradition is used. Otherwise, neutral K1, K2... and, for power laws N are used:
- BET: `"〈"n"〉"=(n_mC_Ba)/((1-a)(1-a+C_Ba))`
- Bradley: `"〈"n"〉"=ln(ln(1/a)/B)/ln(A)`
- GAB: `"〈"n"〉"=(n_mC_BKa)/((1-Ka)(1-Ka+C_BKa))`
- Hailwood-Horrobin: `"〈"n"〉"=a/(A+Ba-Ca^2)`
- Halsey: `"〈"n"〉"=(-K/ln(a))^(1/n)`
- Henderson: `"〈"n"〉"=(ln(1-a)/(-A))^(1/B)`
- Oswin: `"〈"n"〉"=K_1(a/(1-a))^N`
- Peleg: `"〈"n"〉"=K_1a^(N_1)+K_2a^(N_2)`
- Smith: `"〈"n"〉"=K_1-K_2ln(1-a)`
Fitting your own data
A set of representitive isotherms based on the different models is provided in Food-Isotherms.zip. The master file is in Excel and the individual worksheets show the parameters used. The individual isotherms are in .csv format with the first row Item, a, n and subsequent rows with Data, a_value, n_value. This slightly unfamiliar format reflects modern JavaScript file handling methods.
Prepare your own file in that format. The n values should be in g/100g which we chose as the most popular standard format.
You choose File A-B-C Fit, load your file and examine the quality of fit. The Fit Quality slider can be moved as a compromise between speed of fitting and quality of fit. Typically a value of 5 is suitable but you may need to increase it if your isotherm is highly atypical.
If you have a reasonable fit to, say, a GAB model, select that model from the combobox. The curved doesn't change (because you haven't altered A, B, C) but you get the GAB parameters calculated. You can then compare them to the values from the original spreadsheet.
Converting your old parameters
For systems such as GAB that have the same functional form as ABC, if you select them, the Model Parameters box becomes editable. If you type your own parameters in any reasonable format and click the -->ABC button then a full conversion to ABC is performed, allowing access to all the relevant parameters.
For others you have to play with ABC sliders till you get your parameters to appear in the box, at which point you have the relevant ABC parameters. Maybe in future versions this inconvenient approach will be updated.
Steven Abbott, Kaja Harton, Nobuyuki Matubayasi, Seishi Shimizu, What Are Our Food/Water Isotherms Really Telling Us? A New Way to Look at Old Data, in Soft Matter in Foods. Gillies, G. & Rousseau, D. (eds.). The Royal Society of Chemistry (2025). We thank the Editors for allowing us to release the app ahead of publication.
Seishi Shimizu and Nobuyuki Matubayasi, Sorption: A Statistical Thermodynamic Fluctuation Theory, Langmuir 2021, 37, 24, 7380–7391
Seishi Shimizu and Nobuyuki Matubayasi, Understanding Sorption Mechanisms Directly from Isotherms, Langmuir 2023, 39, 6113–6125
Seishi Shimizu and Nobuyuki Matubayasi, Surface Area Estimation: Replacing the BET Model with the Statistical Thermodynamic Fluctuation Theory, Langmuir 2022, 38, 7989–8002
M. Peleg, Models of Sigmoid Equilibrium Moisture Sorption Isotherms With and Without the Monolayer Hypothesis, Food Eng. Rev., 2020, 12, 1–13