How efficient is operator spread when copying JavaScript objects?

Question:

I recently saw a comment out there that talked about the performance of the "spread" or "spread" operator in JavaScript.

The question essentially would be: if we want to copy an object and for whatever reason we can't use the methods provided by JavaScript to do this, how would we implement this function.

This only makes sense if the values ​​are manipulated in some way, such as if the copy has to be deep. If that's not the case, we should simply use :

const copy = { ...object };

Or:

const copy = Object.assign({}, object);

With that, the confusion would be, between the two following ways of copying an object, which would be the most efficient:

function copyObjectWithSpread(target) {
  const keys = Object.keys(target);

  return keys.reduce((memo, key) => ({
    ...memo,
    [key]: target[key]
  }), {});
}

Or:

function copyObjectWithAssignment(target) {
  const keys = Object.keys(target);

  return keys.reduce((memo, key) => {
    memo[key] = target[key];
    return memo;
  }, {});
}

Answer:

If we wanted to implement a good copy function, it's important to understand that in the first version:

{ ...memo, [key]: target[key] }

It's copying memo to this new temporary object on each iteration and throwing it away. While:

base[prop] = value

It's just an ordinary assignment.

Since this is in a loop in reduce, the accumulated result is copied over and over in every iteration.

In the first iteration the accumulator has 0 properties, so there is a cost of 0, but in the following ones there is a cost:

1 + 2 + 3 + 4 + ... + n = n?

Where n is the number of keys.

This sum is like an "additive factorial" and was called the "termial function" by Donald Knuth. It can also be written as the sum of terms in an arithmetic progression:

n? = ( n ^ 2 + n ) / 2

So the first version has quadratic complexity O(n^2) as a function of the number of keys. While the second has linear complexity O(n) as a function of the number of keys.

In practice, copying the first form (spread in the loop) will always be slower even for a small number of keys, and more and more the more keys there are.

I wrote the following script to test the effects on my own computer:

let r;

const properlyTypedObjectWith8Keys = {
  key7: false,
  key1: "here",
  key2: "there",
  key3: "yey",
  key4: new String(),
  key5: 123,
  key6: /polymorphic/,
  key8: {}
};

const numKeys = 10e6;
const propertyValue = "stuff here";
const nonProperlyTypedObjectWith8Keys = {};

for (let i = 0; i < numKeys; i++) {
  nonProperlyTypedObjectWith8Keys[
    "random-key" + Math.round(Math.random() * 1000)
  ] = propertyValue;
}

function copyingMerge(o) {
  return function run(base, prop) {
    return { ...base, [prop]: o[prop] };
  };
}

function nonCopyingMerge(o) {
  return function run(base, prop) {
    base[prop] = o[prop];
    return base;
  };
}

function runMerge(object, merge) {
  return Object.keys(object).reduce(merge(object), {});
}

function assert(bool) {
  if (!bool) {
    throw new Error("assertion failed");
  }
}

function testCopyingMerge() {
  const result = runMerge(properlyTypedObjectWith8Keys, copyingMerge);

  const resultKeys = Object.keys(result);

  assert(resultKeys.length === 8);
  resultKeys.forEach(key => {
    assert(result[key] === properlyTypedObjectWith8Keys[key]);
  });
}

function testNonCopyingMerge() {
  const result = runMerge(properlyTypedObjectWith8Keys, nonCopyingMerge);

  const resultKeys = Object.keys(result);

  assert(resultKeys.length === 8);
  resultKeys.forEach(key => {
    assert(result[key] === properlyTypedObjectWith8Keys[key]);
  });
}

function testCopyingMergeWithUntyped() {
  const result = runMerge(nonProperlyTypedObjectWith8Keys, copyingMerge);

  const resultKeys = Object.keys(result);

  assert(
    resultKeys.length === Object.keys(nonProperlyTypedObjectWith8Keys).length
  );
  resultKeys.forEach(key => {
    assert(result[key] === nonProperlyTypedObjectWith8Keys[key]);
  });
}

function testNonCopyingMergeWithUntyped() {
  const result = runMerge(nonProperlyTypedObjectWith8Keys, nonCopyingMerge);

  const resultKeys = Object.keys(result);

  assert(
    resultKeys.length === Object.keys(nonProperlyTypedObjectWith8Keys).length
  );
  resultKeys.forEach(key => {
    assert(result[key] === nonProperlyTypedObjectWith8Keys[key]);
  });
}

function runBenchmark(iterations, name, fn) {
  const start = Date.now();
  try {
    for (let i = 0; i < iterations; i++) {
      fn();
    }
  } catch (err) {
    console.error(name, "failed", err.stack);
    throw err;
  }

  const end = Date.now();

  const el = document.querySelector('#' + name);
  el.innerText = [
    name,
    "levou",
    ((end - start) / iterations) + 'ms',
    "em média"
  ].join(' ');
}

runBenchmark(1e6, "copying-result", testCopyingMerge);
runBenchmark(1e6, "noncopying-result", testNonCopyingMerge);

// This is so slow that it can't run a million times.
runBenchmark(100, "copying-result-big", testCopyingMergeWithUntyped);
runBenchmark(100, "noncopying-result-big", testNonCopyingMergeWithUntyped);
body {
  font-family: sans-serif;
}
<h1>Com operador spread, 8 chaves</h1>
<div id="copying-result"></div>

<h1>Sem operador spread, 8 chaves</h1>
<div id="noncopying-result"></div>

<h1>Com operador spread, 1 milhão de chaves</h1>
<div id="copying-result-big"></div>

<h1>Sem operador spread, 1 milhão de chaves</h1>
<div id="noncopying-result-big"></div>

It runs in your own browser.

With 8 keys there is ~5x difference. With 10e6 keys here there is ~150x difference.

Whether or not this matters to your object construction use case in general depends. There are usually not so many problems with copying objects, even millions of copies should be reasonably cheap with few keys.

However, in the context of writing a good quality copy function, there doesn't seem to be any reason to use a quadratic function rather than a linear one. Whereas both are equally readable.

Scroll to Top
AllEscort