-
-
Notifications
You must be signed in to change notification settings - Fork 1.9k
Transform react #2577
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Transform react #2577
Changes from 1 commit
bbe3533
ed24765
fcc459d
7044a13
88b7b43
cf4c9c3
e84d4b9
b436d52
2a41f9e
965bcfb
6fae229
79295f1
5e9aa65
690eb95
a244cec
92bd5d2
dc6de2f
03956e1
f439e41
e47e6a9
aa30ad6
4c70826
7279b55
3e250df
cd08479
0414147
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12,6 +12,7 @@ var helpers = require('@src/plot_api/helpers'); | |
var editTypes = require('@src/plot_api/edit_types'); | ||
var annotations = require('@src/components/annotations'); | ||
var images = require('@src/components/images'); | ||
var Registry = require('@src/registry'); | ||
|
||
var d3 = require('d3'); | ||
var createGraphDiv = require('../assets/create_graph_div'); | ||
|
@@ -1053,14 +1054,14 @@ describe('Test plot api', function() { | |
var mlcmax1 = 6; | ||
var mlcscl1 = 'greens'; | ||
|
||
function check(auto, msg) { | ||
function check(auto, autocolorscale, msg) { | ||
expect(gd.data[0].marker.cauto).toBe(auto, msg); | ||
expect(gd.data[0].marker.cmin).negateIf(auto).toBe(mcmin0); | ||
expect(gd._fullData[0].marker.autocolorscale).toBe(auto, msg); | ||
expect(gd._fullData[0].marker.autocolorscale).toBe(autocolorscale, msg); | ||
expect(gd.data[0].marker.colorscale).toEqual(auto ? autocscale : mcscl0); | ||
expect(gd.data[1].marker.line.cauto).toBe(auto, msg); | ||
expect(gd.data[1].marker.line.cmax).negateIf(auto).toBe(mlcmax1); | ||
expect(gd._fullData[1].marker.line.autocolorscale).toBe(auto, msg); | ||
expect(gd._fullData[1].marker.line.autocolorscale).toBe(autocolorscale, msg); | ||
expect(gd.data[1].marker.line.colorscale).toEqual(auto ? autocscale : mlcscl1); | ||
} | ||
|
||
|
@@ -1069,28 +1070,30 @@ describe('Test plot api', function() { | |
{y: [2, 1], mode: 'markers', marker: {line: {width: 2, color: [3, 4]}}} | ||
]) | ||
.then(function() { | ||
check(true, 'initial'); | ||
// autocolorscale is actually true after supplyDefaults, but during calc it's set | ||
// to false when we push the resulting colorscale back to the input container | ||
check(true, false, 'initial'); | ||
return Plotly.restyle(gd, {'marker.cmin': mcmin0, 'marker.colorscale': mcscl0}, null, [0]); | ||
}) | ||
.then(function() { | ||
return Plotly.restyle(gd, {'marker.line.cmax': mlcmax1, 'marker.line.colorscale': mlcscl1}, null, [1]); | ||
}) | ||
.then(function() { | ||
check(false, 'set min/max/scale'); | ||
check(false, false, 'set min/max/scale'); | ||
return Plotly.restyle(gd, {'marker.cauto': true, 'marker.autocolorscale': true}, null, [0]); | ||
}) | ||
.then(function() { | ||
return Plotly.restyle(gd, {'marker.line.cauto': true, 'marker.line.autocolorscale': true}, null, [1]); | ||
}) | ||
.then(function() { | ||
check(true, 'reset'); | ||
check(true, true, 'reset'); | ||
return Queue.undo(gd); | ||
}) | ||
.then(function() { | ||
return Queue.undo(gd); | ||
}) | ||
.then(function() { | ||
check(false, 'undo'); | ||
check(false, false, 'undo'); | ||
}) | ||
.catch(failTest) | ||
.then(done); | ||
|
@@ -3183,6 +3186,7 @@ describe('Test plot api', function() { | |
['cheater_smooth', require('@mocks/cheater_smooth.json')], | ||
['finance_style', require('@mocks/finance_style.json')], | ||
['geo_first', require('@mocks/geo_first.json')], | ||
['gl2d_heatmapgl', require('@mocks/gl2d_heatmapgl.json')], | ||
['gl2d_line_dash', require('@mocks/gl2d_line_dash.json')], | ||
['gl2d_parcoords_2', require('@mocks/gl2d_parcoords_2.json')], | ||
['gl2d_pointcloud-basic', require('@mocks/gl2d_pointcloud-basic.json')], | ||
|
@@ -3196,12 +3200,14 @@ describe('Test plot api', function() { | |
['range_selector_style', require('@mocks/range_selector_style.json')], | ||
['range_slider_multiple', require('@mocks/range_slider_multiple.json')], | ||
['sankey_energy', require('@mocks/sankey_energy.json')], | ||
['scattercarpet', require('@mocks/scattercarpet.json')], | ||
['splom_iris', require('@mocks/splom_iris.json')], | ||
['table_wrapped_birds', require('@mocks/table_wrapped_birds.json')], | ||
['ternary_fill', require('@mocks/ternary_fill.json')], | ||
['text_chart_arrays', require('@mocks/text_chart_arrays.json')], | ||
['transforms', require('@mocks/transforms.json')], | ||
['updatemenus', require('@mocks/updatemenus.json')], | ||
['violins', require('@mocks/violins.json')], | ||
['violin_side-by-side', require('@mocks/violin_side-by-side.json')], | ||
['world-cals', require('@mocks/world-cals.json')], | ||
['typed arrays', { | ||
data: [{ | ||
|
@@ -3211,86 +3217,127 @@ describe('Test plot api', function() { | |
}] | ||
]; | ||
|
||
mockList.forEach(function(mockSpec) { | ||
it('can redraw "' + mockSpec[0] + '" with no changes as a noop', function(done) { | ||
var mock = mockSpec[1]; | ||
var initialJson; | ||
|
||
function fullJson() { | ||
var out = JSON.parse(Plotly.Plots.graphJson({ | ||
data: gd._fullData, | ||
layout: gd._fullLayout | ||
})); | ||
|
||
// TODO: does it matter that ax.tick0/dtick/range and zmin/zmax | ||
// are often not regenerated without a calc step? | ||
// in as far as editor and others rely on _full, I think the | ||
// answer must be yes, but I'm not sure about within plotly.js | ||
[ | ||
'xaxis', 'xaxis2', 'xaxis3', 'xaxis4', 'xaxis5', | ||
'yaxis', 'yaxis2', 'yaxis3', 'yaxis4', | ||
'zaxis' | ||
].forEach(function(axName) { | ||
var ax = out.layout[axName]; | ||
// make sure we've included every trace type in this suite | ||
var typesTested = {}; | ||
var itemType; | ||
for(itemType in Registry.modules) { typesTested[itemType] = 0; } | ||
for(itemType in Registry.transformsRegistry) { typesTested[itemType] = 0; } | ||
|
||
// Only include scattermapbox locally, see below | ||
delete typesTested.scattermapbox; | ||
|
||
// Not being supported? This isn't part of the main bundle, and it's pretty broken, | ||
// but it gets registered and used by a couple of the gl2d tests. | ||
delete typesTested.contourgl; | ||
|
||
function _runReactMock(mockSpec, done) { | ||
var mock = mockSpec[1]; | ||
var initialJson; | ||
|
||
function fullJson() { | ||
var out = JSON.parse(Plotly.Plots.graphJson({ | ||
data: gd._fullData.map(function(trace) { return trace._fullInput; }), | ||
layout: gd._fullLayout | ||
})); | ||
|
||
// TODO: does it matter that ax.tick0/dtick/range and zmin/zmax | ||
// are often not regenerated without a calc step? | ||
// in as far as editor and others rely on _full, I think the | ||
// answer must be yes, but I'm not sure about within plotly.js | ||
[ | ||
'xaxis', 'xaxis2', 'xaxis3', 'xaxis4', 'xaxis5', | ||
'yaxis', 'yaxis2', 'yaxis3', 'yaxis4', | ||
'zaxis' | ||
].forEach(function(axName) { | ||
var ax = out.layout[axName]; | ||
if(ax) { | ||
delete ax.dtick; | ||
delete ax.tick0; | ||
|
||
// TODO this one I don't understand and can't reproduce | ||
// in the dashboard but it's needed here? | ||
delete ax.range; | ||
} | ||
if(out.layout.scene) { | ||
ax = out.layout.scene[axName]; | ||
if(ax) { | ||
delete ax.dtick; | ||
delete ax.tick0; | ||
|
||
// TODO this one I don't understand and can't reproduce | ||
// in the dashboard but it's needed here? | ||
// TODO: this is the only one now that uses '_input_' + key | ||
// as a hack to tell Plotly.react to ignore changes. | ||
// Can we kill this? | ||
delete ax.range; | ||
} | ||
if(out.layout.scene) { | ||
ax = out.layout.scene[axName]; | ||
if(ax) { | ||
delete ax.dtick; | ||
delete ax.tick0; | ||
// TODO: this is the only one now that uses '_input_' + key | ||
// as a hack to tell Plotly.react to ignore changes. | ||
// Can we kill this? | ||
delete ax.range; | ||
} | ||
} | ||
}); | ||
out.data.forEach(function(trace) { | ||
if(trace.type === 'contourcarpet') { | ||
delete trace.zmin; | ||
delete trace.zmax; | ||
} | ||
}); | ||
} | ||
}); | ||
out.data.forEach(function(trace) { | ||
if(trace.type === 'contourcarpet') { | ||
delete trace.zmin; | ||
delete trace.zmax; | ||
} | ||
}); | ||
|
||
return out; | ||
} | ||
|
||
return out; | ||
// Make sure we define `_length` in every trace *in supplyDefaults*. | ||
// This is only relevant for traces that *have* a 1D concept of length, | ||
// and in addition to simplifying calc/plot logic later on, ths serves | ||
// as a signal to transforms about how they should operate. For traces | ||
// that do NOT have a 1D length, `_length` should be `null`. | ||
var mockGD = Lib.extendDeep({}, mock); | ||
supplyAllDefaults(mockGD); | ||
expect(mockGD._fullData.length).not.toBeLessThan((mock.data || []).length, mockSpec[0]); | ||
mockGD._fullData.forEach(function(trace, i) { | ||
var len = trace._length; | ||
if(trace.visible !== false && len !== null) { | ||
expect(typeof len).toBe('number', mockSpec[0] + ' trace ' + i + ': type=' + trace.type); | ||
} | ||
|
||
// Make sure we define `_length` in every trace *in supplyDefaults*. | ||
// This is only relevant for traces that *have* a 1D concept of length, | ||
// and in addition to simplifying calc/plot logic later on, ths serves | ||
// as a signal to transforms about how they should operate. For traces | ||
// that do NOT have a 1D length, `_length` should be `null`. | ||
var mockGD = Lib.extendDeep({}, mock); | ||
supplyAllDefaults(mockGD); | ||
expect(mockGD._fullData.length).not.toBeLessThan((mock.data || []).length, mockSpec[0]); | ||
mockGD._fullData.forEach(function(trace, i) { | ||
var len = trace._length; | ||
if(trace.visible !== false && len !== null) { | ||
expect(typeof len).toBe('number', mockSpec[0] + ' trace ' + i + ': type=' + trace.type); | ||
} | ||
}); | ||
typesTested[trace.type]++; | ||
|
||
Plotly.newPlot(gd, mock) | ||
.then(countPlots) | ||
.then(function() { | ||
initialJson = fullJson(); | ||
|
||
return Plotly.react(gd, mock); | ||
}) | ||
.then(function() { | ||
expect(fullJson()).toEqual(initialJson); | ||
countCalls({}); | ||
}) | ||
.catch(failTest) | ||
.then(done); | ||
if(trace.transforms) { | ||
trace.transforms.forEach(function(transform) { | ||
typesTested[transform.type]++; | ||
}); | ||
} | ||
}); | ||
|
||
Plotly.newPlot(gd, mock) | ||
.then(countPlots) | ||
.then(function() { | ||
initialJson = fullJson(); | ||
|
||
return Plotly.react(gd, mock); | ||
}) | ||
.then(function() { | ||
expect(fullJson()).toEqual(initialJson); | ||
countCalls({}); | ||
}) | ||
.catch(failTest) | ||
.then(done); | ||
} | ||
|
||
mockList.forEach(function(mockSpec) { | ||
it('can redraw "' + mockSpec[0] + '" with no changes as a noop', function(done) { | ||
_runReactMock(mockSpec, done); | ||
}); | ||
}); | ||
|
||
it('@noCI can redraw scattermapbox with no changes as a noop', function(done) { | ||
typesTested.scattermapbox = 0; | ||
|
||
Plotly.setPlotConfig({ | ||
mapboxAccessToken: require('@build/credentials.json').MAPBOX_ACCESS_TOKEN | ||
}); | ||
|
||
_runReactMock(['scattermapbox', require('@mocks/mapbox_bubbles-text.json')], done); | ||
}); | ||
|
||
it('tested every trace & transform type at least once', function() { | ||
for(var itemType in typesTested) { | ||
expect(typesTested[itemType]).toBeGreaterThan(0, itemType + ' was not tested'); | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So when a new trace or transform type is added we'll be forced to add it to this suite as well. (note above I excluded I couldn't think of a good way to ensure that every component gets tested as well... in fact looking over the list now I don't see shapes or 3D annotations, I'll have to add those. If anyone has ideas how to enforce this please chime in! There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
🥇
There's probably a way using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK - still sounds a bit finicky... perhaps a later iteration we could add a test that ensures we've covered every attribute anywhere in the schema - that would be a significantly more stringent coverage test for traces and transforms as well! But if you don't mind I'll defer this for now. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
No problem 👍 |
||
}); | ||
}); | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there anything we can think of that could be asserted about the cartesian product here? I.e. given
M1
andM2
is there anything in the JSON representation that we want to try to assert aboutM1.reactWith(M2).reactWith(M1)
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are likely some things that are desirable that are already true in all cases and might be worth locking down, and some things that are desirable that are true in all but a small subset of cases and could be fixed easily (low-hanging fruits). There are also probably a large number of desirable things that aren't true and that are hard to fix, which I'm less interested in in the short run.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I tested a few of those transitions in fcc459d#diff-5a140ce1cc87a420496039bbf8d699f1R3114 - and in fact after this PR (knock wood) I kind of don't think there will be a lot of nasty issues uncovered by testing this cartesian product. It's also going to be a very long test if you want to cover every combination, so it sounds to me more like we'd want to test them all once, offline, find the subset that fail, and incorporate just those into a new test.