Nate escreveuYou would use spine-ts for this. If you just want to show one skeleton, spine-player (uses spine-webgl) is easiest, as it handles the camera/etc. You can still manipulate the skeleton with spine-player. If you want to show multiple skeletons, if it's just rendering a second skeleton in the background, you might still be able to use spine-player. Otherwise you can use spine-webgl (spine-canvas is more limited). spine-webgl (uses spine-ts) gives you the most features, but you need to handle setting up the camera and a few other things yourself. spine-webgl does the basics, but is not a game toolkit. If you want a game toolkit that provides you easier ways to do rendering and build games, you can look at game toolkits that use the Spine Runtimes, like pixi.js or Phaser.
Playing animations and manipulating the skeletons (like by handling user input) is done via the Spine Runtimes API. See the runtimes user guide.
Why are you using such an old version of Spine? You should us 4.1 for any new projects, both the editor and runtimes are much improved.
Hello. Thank you for your reply.
I am only a contractor and have no influence on the version of ѕріne the customer uses, so I have to work with what I have.
The aim of the project is to create a full-fledged dynamic demonstration, able to show all the possibilities of animation. Since there will be a lot of animations in the long run, it is easier for me to use solutions that I can control. Spine-player doesn't work for me in this respect.
And yes, as you noted, I will have to solve some camera problems, although the questions above are still there. Overall, the documentation partially revealed them, but I could use some clarification.
let canvas
let gl
let shader
let batcher
let mvp = new spine.webgl.Matrix4()
let skeletonRenderer
let assetManager
let debugRenderer
let shapes
let lastFrameTime
let skeletons = {}
let activeSkeleton = "Hoshino_home"
let swirlTime = 0
function init() {
// Setup canvas and WebGL context. We pass alpha: false to canvas.getContext() so we don't use premultiplied alpha when
// loading textures. That is handled separately by PolygonBatcher.
canvas = document.getElementById("canvas");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
var config = { alpha: false };
gl = canvas.getContext("webgl", config) || canvas.getContext("experimental-webgl", config)
if (!gl) {
alert('WebGL is unavailable.');
return;
}
// Create a simple shader, mesh, model-view-projection matrix, SkeletonRenderer, and AssetManager.
shader = spine.webgl.Shader.newTwoColoredTextured(gl);
batcher = new spine.webgl.PolygonBatcher(gl);
mvp.ortho2d(0, 0, canvas.width - 1, canvas.height - 1);
skeletonRenderer = new spine.webgl.SkeletonRenderer(gl);
assetManager = new spine.webgl.AssetManager(gl);
requestAnimationFrame(loadAssetL2D);
}
function loadAssetL2D() {
assetManager.loadBinary("assets/Hoshino_home.skel");
assetManager.loadTextureAtlas("assets/Hoshino_home.atlas");
}
function createL2D() {
// Wait until the AssetManager has loaded all resources, then load the skeletons.
if (assetManager.isLoadingComplete()) {
skeletons["Hoshino_home"] = startL2d("Hoshino_home", "Idle_01");
lastFrameTime = Date.now() / 1000;
requestAnimationFrame(render); // Loading is done, call render every frame.
} else {
requestAnimationFrame(createL2D);
}
}
function calculateSetupPoseBounds(skeleton) {
skeleton.setToSetupPose();
skeleton.updateWorldTransform();
var offset = new spine.Vector2();
var size = new spine.Vector2();
skeleton.getBounds(offset, size, []);
return { offset: offset, size: size };
}
function startL2d(name, initialAnimation) {
// Load the texture atlas using name.atlas from the AssetManager.
var atlas = assetManager.get("assets/" + name + ".atlas");
// Create a AtlasAttachmentLoader that resolves region, mesh, boundingbox and path attachments
var atlasLoader = new spine.AtlasAttachmentLoader(atlas);
// Create a SkeletonBinary instance for parsing the .skel file.
var skeletonBinary = new spine.SkeletonBinary(atlasLoader);
// Set the scale to apply during parsing, parse the file, and create a new skeleton.
skeletonBinary.scale = 2;
var skeletonData = skeletonBinary.readSkeletonData(assetManager.get("assets/" + name + ".skel"));
var skeleton = new spine.Skeleton(skeletonData);
var bounds = calculateSetupPoseBounds(skeleton);
// Create an AnimationState, and set the initial animation in looping mode.
var animationStateData = new spine.AnimationStateData(skeleton.data);
var animationState = new spine.AnimationState(animationStateData);
if (name == "Aru_New_Year_home") {
// animationStateData.setMix("Idle_01", "Idle_01", 0.4)
// animationStateData.setMix("Idle_01", "Idle_01", 0.4);
animationState.setAnimation(0, "Start_Idle_01", false, 0);
animationState.addAnimation(1, "Idle_01", true);
} else {
animationState.setAnimation(0, initialAnimation, true);
}
animationState.addListener({
event: function(track, event) {
console.log("Event on track " + track.trackIndex + ": " + JSON.stringify(event));
}
})
// Pack everything up and return to caller.
return { skeleton: skeleton, state: animationState, bounds: bounds};
}
function render () {
var now = Date.now() / 1000;
var delta = now - lastFrameTime;
lastFrameTime = now;
// Update the MVP matrix to adjust for canvas size changes
resize();
gl.clearColor(0.3, 0.3, 0.3, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
// Apply the animation state based on the delta time.
var skeleton = skeletons[activeSkeleton].skeleton;
var state = skeletons[activeSkeleton].state;
state.update(delta);
state.apply(skeleton);
skeleton.updateWorldTransform();
// Bind the shader and set the texture and model-view-projection matrix.
shader.bind();
shader.setUniformi(spine.webgl.Shader.SAMPLER, 0);
shader.setUniform4x4f(spine.webgl.Shader.MVP_MATRIX, mvp.values);
// Start the batch and tell the SkeletonRenderer to render the active skeleton.
batcher.begin(shader);
skeletonRenderer.draw(batcher, skeleton);
batcher.end();
shader.unbind();
requestAnimationFrame(render);
}
function resize () {
var w = canvas.clientWidth;
var h = canvas.clientHeight;
if (canvas.width != w || canvas.height != h) {
canvas.width = w;
canvas.height = h;
}
// Calculations to center the skeleton in the canvas.
var bounds = skeletons[activeSkeleton].bounds;
var centerX = bounds.offset.x + bounds.size.x / 2;
var centerY = bounds.offset.y + bounds.size.y / 2;
var scaleX = bounds.size.x / canvas.width;
var scaleY = bounds.size.y / canvas.height;
var scale = Math.max(scaleX, scaleY) * 2;
if (scale < 1) scale = 1;
var width = canvas.width * scale;
var height = canvas.height * scale;
mvp.ortho2d(centerX - width / 2, centerY - height / 2, width, height);
gl.viewport(0, 0, canvas.width, canvas.height);
}
init()
let btnStartL2D = document.querySelector('.btnStartL2D')
btnStartL2D.addEventListener('click', () => {
loadAssetL2D()
createL2D()
})
I partially modified the code you attached to the demo on git.
What do I need to do or what direction do I dig to make the alignment on the camera?
All animations in one way or another are linked in the project to the layer All_layer, which is responsible, as I understand it, for the coordinate I need. But how can this coordinate be called and linked to the drawing in canvas, given that the size of the canvas is always different and depends on the bounds in the Asset?
In what order should the additional bg.skel be unloaded and output? All I get is an error about downloading the data.
I partially modified the code you attached to the demo on git.
What do I need to do or what direction do I dig to make the alignment on the camera?
All animations in one way or another are linked in the project to the layer All_layer, which is responsible, as I understand it, for the coordinate I need. But how can this coordinate be called and linked to the drawing in canvas, given that the size of the canvas is always different and depends on the filling of the Asset?
In what order should the additional bg.skel be unloaded and output? All I get is an error about downloading the data.