Bug 1613483 - Add all Browsertime tests with visual metrics to Fenix repo. (#9087)
* Add visual-metrics docker type. * Add required browsertime toolchain fetches. * Add browsertime tests for technical and visual metrics. * Run browsertime tests in a cron task. * Run visual metrics on all browsertime tests. * Use spaces instead of tabs, and resolve visual-metric nits. * Enable browsertime on pull request for testing. * Restrict PR tests to amazon on browsertime. * First attempt using multi_dep. * Add a primary dependency to browsertime. * Try by not popping. * Debug prints. * Make one grouping per browsertime task. * Try without the multi_dep transform. * Delete dependent-tasks in visual-metrics transformer. * Update setuptools installed and copy run-on-tasks-for. * Use get when getting run-on-tasks-for. * Add new pinned requirements. * Try it. * Set run-on-tasks-for properly. * Remove print statement. * Remove single_dep loader, and print statements. * Remove run-on-tasks-for testing setting. * Restart testing, and set user to root in visual-metrics Docker. * Remove testing settings. * Remove fetch-content from Docker. * Change attributes grouping method. * Run all tests as a check. * Undo testing changes, and fix a bad test name.master
parent
0d974fe262
commit
a457755388
|
@ -24,3 +24,9 @@ jobs:
|
||||||
treeherder-symbol: raptor-D
|
treeherder-symbol: raptor-D
|
||||||
target-tasks-method: raptor
|
target-tasks-method: raptor
|
||||||
when: [{hour: 1, minute: 0}]
|
when: [{hour: 1, minute: 0}]
|
||||||
|
- name: browsertime
|
||||||
|
job:
|
||||||
|
type: decision-task
|
||||||
|
treeherder-symbol: btime-D
|
||||||
|
target-tasks-method: browsertime
|
||||||
|
when: [{hour: 1, minute: 0}]
|
||||||
|
|
|
@ -0,0 +1,231 @@
|
||||||
|
---
|
||||||
|
loader: taskgraph.loader.transform:loader
|
||||||
|
transforms:
|
||||||
|
- fenix_taskgraph.transforms.browsertime:transforms
|
||||||
|
- fenix_taskgraph.transforms.notify:transforms
|
||||||
|
- taskgraph.transforms.job:transforms
|
||||||
|
- taskgraph.transforms.task:transforms
|
||||||
|
|
||||||
|
kind-dependencies:
|
||||||
|
- signing
|
||||||
|
- toolchain
|
||||||
|
|
||||||
|
primary-dependency: signing
|
||||||
|
|
||||||
|
only-for-build-types:
|
||||||
|
- performance-test
|
||||||
|
|
||||||
|
only-for-abis:
|
||||||
|
- armeabi-v7a
|
||||||
|
- arm64-v8a
|
||||||
|
|
||||||
|
job-defaults:
|
||||||
|
dependencies:
|
||||||
|
geckoview-nightly: geckoview-nightly
|
||||||
|
notify:
|
||||||
|
by-level:
|
||||||
|
'3':
|
||||||
|
email:
|
||||||
|
content: This calls for an action of the Performance team. Use the link to view it on Treeherder.
|
||||||
|
link:
|
||||||
|
text: Treeherder Job
|
||||||
|
href: 'https://treeherder.mozilla.org/#/jobs?repo={product_name}&revision={head_rev}&searchStr={task_name}'
|
||||||
|
on-reasons: [failed]
|
||||||
|
subject: '[{product_name}] Raptor-Browsertime job "{task_name}" failed'
|
||||||
|
to-addresses: [perftest-alerts@mozilla.com]
|
||||||
|
default: {}
|
||||||
|
run-on-tasks-for: []
|
||||||
|
treeherder:
|
||||||
|
kind: test
|
||||||
|
tier: 2
|
||||||
|
platform:
|
||||||
|
by-abi:
|
||||||
|
arm64-v8a: android-hw-p2-8-0-android-aarch64/opt
|
||||||
|
armeabi-v7a: android-hw-g5-7-0-arm7-api-16/opt
|
||||||
|
worker-type:
|
||||||
|
by-abi:
|
||||||
|
armeabi-v7a: t-bitbar-gw-perf-g5
|
||||||
|
arm64-v8a: t-bitbar-gw-perf-p2
|
||||||
|
worker:
|
||||||
|
max-run-time: 3600
|
||||||
|
env:
|
||||||
|
GECKO_HEAD_REPOSITORY: "https://hg.mozilla.org/mozilla-central"
|
||||||
|
MOZ_AUTOMATION: "1"
|
||||||
|
MOZ_HIDE_RESULTS_TABLE: "1"
|
||||||
|
MOZ_NO_REMOTE: "1"
|
||||||
|
MOZ_NODE_PATH: "/usr/local/bin/node"
|
||||||
|
MOZHARNESS_CONFIG: "raptor/android_hw_config.py"
|
||||||
|
MOZHARNESS_SCRIPT: "raptor_script.py"
|
||||||
|
NEED_XVFB: "false"
|
||||||
|
NO_FAIL_ON_TEST_ERRORS: "1"
|
||||||
|
XPCOM_DEBUG_BREAK: "warn"
|
||||||
|
artifacts:
|
||||||
|
- name: public/logs/
|
||||||
|
path: workspace/logs
|
||||||
|
type: directory
|
||||||
|
|
||||||
|
- name: public/test_info/
|
||||||
|
path: workspace/build/blobber_upload_dir
|
||||||
|
type: directory
|
||||||
|
run-visual-metrics: True
|
||||||
|
run:
|
||||||
|
using: run-task
|
||||||
|
checkout: false
|
||||||
|
run-as-root: true
|
||||||
|
command:
|
||||||
|
- 'bash'
|
||||||
|
- './test-linux.sh'
|
||||||
|
- '--cfg=mozharness/configs/raptor/android_hw_config.py'
|
||||||
|
- '--app=fenix'
|
||||||
|
- '--browsertime'
|
||||||
|
- '--cold'
|
||||||
|
- '--binary=org.mozilla.fenix.performancetest'
|
||||||
|
- '--activity=org.mozilla.fenix.browser.BrowserPerformanceTestActivity'
|
||||||
|
- '--download-symbols=ondemand'
|
||||||
|
- '--browsertime-node=$MOZ_FETCHES_DIR/node/bin/node'
|
||||||
|
- '--browsertime-geckodriver=$MOZ_FETCHES_DIR/geckodriver'
|
||||||
|
- '--browsertime-ffmpeg=$MOZ_FETCHES_DIR/ffmpeg-4.1.4-i686-static/bin/ffmpeg'
|
||||||
|
- '--browsertime-browsertimejs=$MOZ_FETCHES_DIR/browsertime/node_modules/browsertime/bin/browsertime.js'
|
||||||
|
fetches:
|
||||||
|
toolchain:
|
||||||
|
- browsertime
|
||||||
|
- linux64-ffmpeg-4.1.4
|
||||||
|
- linux64-geckodriver
|
||||||
|
- linux64-minidump-stackwalk
|
||||||
|
- linux64-node
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
tp6m-1-cold:
|
||||||
|
test-name: amazon
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-1-c)'
|
||||||
|
|
||||||
|
tp6m-2-cold:
|
||||||
|
test-name: google
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-2-c)'
|
||||||
|
|
||||||
|
tp6m-3-cold:
|
||||||
|
test-name: instagram
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-3-c)'
|
||||||
|
|
||||||
|
tp6m-4-cold:
|
||||||
|
test-name: bing-search-restaurants
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-4-c)'
|
||||||
|
|
||||||
|
tp6m-5-cold:
|
||||||
|
test-name: ebay-kleinanzeigen-search
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-5-c)'
|
||||||
|
|
||||||
|
tp6m-6-cold:
|
||||||
|
test-name: amazon-search
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-6-c)'
|
||||||
|
|
||||||
|
tp6m-7-cold:
|
||||||
|
test-name: wikipedia
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-7-c)'
|
||||||
|
|
||||||
|
tp6m-8-cold:
|
||||||
|
test-name: booking
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-8-c)'
|
||||||
|
|
||||||
|
tp6m-9-cold:
|
||||||
|
test-name: cnn-ampstories
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-9-c)'
|
||||||
|
|
||||||
|
tp6m-10-cold:
|
||||||
|
test-name: bbc
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-10-c)'
|
||||||
|
|
||||||
|
tp6m-11-cold:
|
||||||
|
test-name: microsoft-support
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-11-c)'
|
||||||
|
|
||||||
|
tp6m-12-cold:
|
||||||
|
test-name: imdb
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-12-c)'
|
||||||
|
|
||||||
|
tp6m-13-cold:
|
||||||
|
test-name: espn
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-13-c)'
|
||||||
|
|
||||||
|
tp6m-14-cold:
|
||||||
|
test-name: facebook-cristiano
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-14-c)'
|
||||||
|
|
||||||
|
tp6m-15-cold:
|
||||||
|
test-name: facebook
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-15-c)'
|
||||||
|
|
||||||
|
tp6m-16-cold:
|
||||||
|
test-name: youtube
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-16-c)'
|
||||||
|
|
||||||
|
tp6m-17-cold:
|
||||||
|
test-name: bing
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-17-c)'
|
||||||
|
|
||||||
|
tp6m-18-cold:
|
||||||
|
test-name: ebay-kleinanzeigen
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-18-c)'
|
||||||
|
|
||||||
|
tp6m-19-cold:
|
||||||
|
test-name: google-maps
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-19-c)'
|
||||||
|
|
||||||
|
tp6m-20-cold:
|
||||||
|
test-name: youtube-watch
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-20-c)'
|
||||||
|
|
||||||
|
tp6m-21-cold:
|
||||||
|
test-name: reddit
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-21-c)'
|
||||||
|
|
||||||
|
tp6m-22-cold:
|
||||||
|
test-name: stackoverflow
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-22-c)'
|
||||||
|
|
||||||
|
tp6m-23-cold:
|
||||||
|
test-name: jianshu
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-23-c)'
|
||||||
|
|
||||||
|
tp6m-24-cold:
|
||||||
|
test-name: allrecipes
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-24-c)'
|
||||||
|
|
||||||
|
tp6m-25-cold:
|
||||||
|
test-name: web-de
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-25-c)'
|
||||||
|
|
||||||
|
tp6m-26-cold:
|
||||||
|
test-name: aframe
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-26-c)'
|
||||||
|
|
||||||
|
tp6m-27-cold:
|
||||||
|
test-name: cnn
|
||||||
|
treeherder:
|
||||||
|
symbol: 'Btime(tp6m-27-c)'
|
|
@ -13,6 +13,7 @@ treeherder:
|
||||||
'productionFennec': 'Production-related tasks with same APK configuration as Fennec'
|
'productionFennec': 'Production-related tasks with same APK configuration as Fennec'
|
||||||
'Rap': 'Raptor tests'
|
'Rap': 'Raptor tests'
|
||||||
'Rap-P': 'Raptor power tests'
|
'Rap-P': 'Raptor power tests'
|
||||||
|
'Btime': 'Raptor-Browsertime tests'
|
||||||
|
|
||||||
task-priority: highest
|
task-priority: highest
|
||||||
|
|
||||||
|
|
|
@ -19,3 +19,6 @@ jobs:
|
||||||
ui-tests:
|
ui-tests:
|
||||||
parent: base
|
parent: base
|
||||||
symbol: I(ui-tests)
|
symbol: I(ui-tests)
|
||||||
|
visual-metrics:
|
||||||
|
parent: base
|
||||||
|
symbol: I(visual-metrics)
|
||||||
|
|
|
@ -10,6 +10,27 @@ job-defaults:
|
||||||
using: index-search
|
using: index-search
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
browsertime:
|
||||||
|
description: "Browsertime toolchain"
|
||||||
|
attributes:
|
||||||
|
toolchain-artifact: public/build/browsertime.tar.bz2
|
||||||
|
run:
|
||||||
|
index-search:
|
||||||
|
- gecko.cache.level-3.toolchains.v3.browsertime.latest
|
||||||
|
linux64-ffmpeg-4.1.4:
|
||||||
|
description: "FFMPEG fetch"
|
||||||
|
attributes:
|
||||||
|
toolchain-artifact: 'public/ffmpeg-4.1.4-i686-static.tar.xz'
|
||||||
|
run:
|
||||||
|
index-search:
|
||||||
|
- gecko.cache.level-3.content.v1.linux64-ffmpeg-4.1.4.latest
|
||||||
|
linux64-geckodriver:
|
||||||
|
description: "Geckodriver toolchain"
|
||||||
|
attributes:
|
||||||
|
toolchain-artifact: public/build/geckodriver.tar.xz
|
||||||
|
run:
|
||||||
|
index-search:
|
||||||
|
- gecko.cache.level-3.toolchains.v3.linux64-geckodriver.latest
|
||||||
linux64-minidump-stackwalk:
|
linux64-minidump-stackwalk:
|
||||||
description: "minidump_stackwalk toolchain"
|
description: "minidump_stackwalk toolchain"
|
||||||
attributes:
|
attributes:
|
||||||
|
@ -17,3 +38,17 @@ jobs:
|
||||||
run:
|
run:
|
||||||
index-search:
|
index-search:
|
||||||
- gecko.cache.level-3.toolchains.v3.linux64-minidump-stackwalk.latest
|
- gecko.cache.level-3.toolchains.v3.linux64-minidump-stackwalk.latest
|
||||||
|
linux64-node:
|
||||||
|
description: "Node.js toolchain"
|
||||||
|
attributes:
|
||||||
|
toolchain-artifact: public/build/node.tar.xz
|
||||||
|
run:
|
||||||
|
index-search:
|
||||||
|
- gecko.cache.level-3.toolchains.v3.linux64-node-10.latest
|
||||||
|
visual-metrics:
|
||||||
|
description: "Browsertime visual metrics analsyis script"
|
||||||
|
attributes:
|
||||||
|
toolchain-artifact: public/visualmetrics.py
|
||||||
|
run:
|
||||||
|
index-search:
|
||||||
|
- gecko.cache.level-3.content.v1.visual-metrics.latest
|
||||||
|
|
|
@ -0,0 +1,49 @@
|
||||||
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
---
|
||||||
|
loader: fenix_taskgraph.loader.multi_dep:loader
|
||||||
|
|
||||||
|
kind-dependencies:
|
||||||
|
- browsertime
|
||||||
|
- toolchain
|
||||||
|
|
||||||
|
primary-dependency:
|
||||||
|
- browsertime
|
||||||
|
|
||||||
|
group-by: attributes
|
||||||
|
|
||||||
|
only-for-attributes:
|
||||||
|
- run-visual-metrics
|
||||||
|
|
||||||
|
transforms:
|
||||||
|
- fenix_taskgraph.transforms.visual_metrics:transforms
|
||||||
|
- taskgraph.transforms.job:transforms
|
||||||
|
- taskgraph.transforms.task:transforms
|
||||||
|
|
||||||
|
job-template:
|
||||||
|
description: "Run visual metrics calculations on Raptor"
|
||||||
|
run-on-projects: []
|
||||||
|
run-on-tasks-for: []
|
||||||
|
worker-type: b-android
|
||||||
|
treeherder:
|
||||||
|
tier: 3
|
||||||
|
kind: other
|
||||||
|
worker:
|
||||||
|
docker-image: {in-tree: visual-metrics}
|
||||||
|
max-run-time: 900
|
||||||
|
artifacts:
|
||||||
|
- type: file
|
||||||
|
name: public/perfherder-data.json
|
||||||
|
path: /builds/worker/artifacts/perfherder-data.json
|
||||||
|
- type: file
|
||||||
|
name: public/summary.json
|
||||||
|
path: /builds/worker/artifacts/summary.json
|
||||||
|
fetches:
|
||||||
|
toolchain:
|
||||||
|
- visual-metrics
|
||||||
|
run:
|
||||||
|
using: run-task
|
||||||
|
command: /builds/worker/bin/run-visual-metrics.py -- --orange --perceptual --contentful --force --renderignore 5 --json --viewport
|
||||||
|
checkout: false
|
||||||
|
run-as-root: true
|
|
@ -0,0 +1,33 @@
|
||||||
|
# %ARG DOCKER_IMAGE_PARENT
|
||||||
|
FROM $DOCKER_IMAGE_PARENT
|
||||||
|
MAINTAINER Gregory Mierzwinski <gmierzwinski@mozilla.com>
|
||||||
|
|
||||||
|
# run-task expects to run as root
|
||||||
|
USER root
|
||||||
|
|
||||||
|
RUN apt-get update -qq && \
|
||||||
|
apt-get install -y \
|
||||||
|
ffmpeg \
|
||||||
|
imagemagick \
|
||||||
|
pyssim \
|
||||||
|
python \
|
||||||
|
python-pil \
|
||||||
|
python3 \
|
||||||
|
python3-pip
|
||||||
|
|
||||||
|
WORKDIR /builds/worker
|
||||||
|
|
||||||
|
USER worker:worker
|
||||||
|
|
||||||
|
COPY requirements.txt /builds/worker/requirements.txt
|
||||||
|
RUN pip3 install setuptools==46.0.0
|
||||||
|
RUN pip3 install --require-hashes -r /builds/worker/requirements.txt && \
|
||||||
|
rm /builds/worker/requirements.txt
|
||||||
|
|
||||||
|
COPY run-visual-metrics.py /builds/worker/bin/run-visual-metrics.py
|
||||||
|
COPY performance-artifact-schema.json /builds/worker/performance-artifact-schema.json
|
||||||
|
|
||||||
|
USER root
|
||||||
|
RUN chmod +x /builds/worker/bin/run-visual-metrics.py
|
||||||
|
|
||||||
|
VOLUME /builds/worker/artifacts/
|
|
@ -0,0 +1,229 @@
|
||||||
|
{
|
||||||
|
"definitions": {
|
||||||
|
"application_schema": {
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"title": "Application under performance test",
|
||||||
|
"enum": [
|
||||||
|
"firefox",
|
||||||
|
"chrome",
|
||||||
|
"chromium",
|
||||||
|
"fennec",
|
||||||
|
"geckoview",
|
||||||
|
"refbrow",
|
||||||
|
"fenix"
|
||||||
|
],
|
||||||
|
"maxLength": 10,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"version": {
|
||||||
|
"title": "Application's version",
|
||||||
|
"maxLength": 40,
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["name"],
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"framework_schema": {
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"title": "Framework name",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"subtest_schema": {
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"title": "Subtest name",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"publicName": {
|
||||||
|
"title": "Public subtest name",
|
||||||
|
"description": "Allows renaming test's name, without breaking existing performance data series",
|
||||||
|
"maxLength": 30,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"value": {
|
||||||
|
"description": "Summary value for subtest",
|
||||||
|
"title": "Subtest value",
|
||||||
|
"type": "number",
|
||||||
|
"minimum": -1000000000000.0,
|
||||||
|
"maximum": 1000000000000.0
|
||||||
|
},
|
||||||
|
"unit": {
|
||||||
|
"title": "Measurement unit",
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 1,
|
||||||
|
"maxLength": 20
|
||||||
|
},
|
||||||
|
"lowerIsBetter": {
|
||||||
|
"description": "Whether lower values are better for subtest",
|
||||||
|
"title": "Lower is better",
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"shouldAlert": {
|
||||||
|
"description": "Whether we should alert",
|
||||||
|
"title": "Should alert",
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"alertThreshold": {
|
||||||
|
"description": "% change threshold before alerting",
|
||||||
|
"title": "Alert threshold",
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 0.0,
|
||||||
|
"maximum": 1000.0
|
||||||
|
},
|
||||||
|
"minBackWindow": {
|
||||||
|
"description": "Minimum back window to use for alerting",
|
||||||
|
"title": "Minimum back window",
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 255
|
||||||
|
},
|
||||||
|
"maxBackWindow": {
|
||||||
|
"description": "Maximum back window to use for alerting",
|
||||||
|
"title": "Maximum back window",
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 255
|
||||||
|
},
|
||||||
|
"foreWindow": {
|
||||||
|
"description": "Fore window to use for alerting",
|
||||||
|
"title": "Fore window",
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 255
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"name",
|
||||||
|
"value"
|
||||||
|
],
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"suite_schema": {
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"title": "Suite name",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"publicName": {
|
||||||
|
"title": "Public suite name",
|
||||||
|
"description": "Allows renaming suite's name, without breaking existing performance data series",
|
||||||
|
"maxLength": 30,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"tags": {
|
||||||
|
"type": "array",
|
||||||
|
"title": "Free form tags, which ease the grouping & searching of performance tests",
|
||||||
|
"description": "Similar to extraOptions, except it does not break existing performance data series",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^[a-zA-Z0-9]{1,24}$"
|
||||||
|
},
|
||||||
|
"uniqueItems": true,
|
||||||
|
"maxItems": 14
|
||||||
|
},
|
||||||
|
"extraOptions": {
|
||||||
|
"type": "array",
|
||||||
|
"title": "Extra options used in running suite",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"maxLength": 100
|
||||||
|
},
|
||||||
|
"uniqueItems": true,
|
||||||
|
"maxItems": 8
|
||||||
|
},
|
||||||
|
"subtests": {
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/subtest_schema"
|
||||||
|
},
|
||||||
|
"title": "Subtests",
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
"value": {
|
||||||
|
"title": "Suite value",
|
||||||
|
"type": "number",
|
||||||
|
"minimum": -1000000000000.0,
|
||||||
|
"maximum": 1000000000000.0
|
||||||
|
},
|
||||||
|
"unit": {
|
||||||
|
"title": "Measurement unit",
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 1,
|
||||||
|
"maxLength": 20
|
||||||
|
},
|
||||||
|
"lowerIsBetter": {
|
||||||
|
"description": "Whether lower values are better for suite",
|
||||||
|
"title": "Lower is better",
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"shouldAlert": {
|
||||||
|
"description": "Whether we should alert on this suite (overrides default behaviour)",
|
||||||
|
"title": "Should alert",
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"alertThreshold": {
|
||||||
|
"description": "% change threshold before alerting",
|
||||||
|
"title": "Alert threshold",
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 0.0,
|
||||||
|
"maximum": 1000.0
|
||||||
|
},
|
||||||
|
"minBackWindow": {
|
||||||
|
"description": "Minimum back window to use for alerting",
|
||||||
|
"title": "Minimum back window",
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 255
|
||||||
|
},
|
||||||
|
"maxBackWindow": {
|
||||||
|
"description": "Maximum back window to use for alerting",
|
||||||
|
"title": "Maximum back window",
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 255
|
||||||
|
},
|
||||||
|
"foreWindow": {
|
||||||
|
"description": "Fore window to use for alerting",
|
||||||
|
"title": "Fore window",
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 255
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"name",
|
||||||
|
"subtests"
|
||||||
|
],
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Structure for submitting performance data as part of a job",
|
||||||
|
"id": "https://treeherder.mozilla.org/schemas/v1/performance-artifact.json#",
|
||||||
|
"properties": {
|
||||||
|
"application":{
|
||||||
|
"$ref": "#/definitions/application_schema"
|
||||||
|
},
|
||||||
|
"framework": {
|
||||||
|
"$ref": "#/definitions/framework_schema"
|
||||||
|
},
|
||||||
|
"suites": {
|
||||||
|
"description": "List of suite-level data submitted as part of this structure",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/suite_schema"
|
||||||
|
},
|
||||||
|
"title": "Performance suites",
|
||||||
|
"type": "array"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"framework",
|
||||||
|
"suites"
|
||||||
|
],
|
||||||
|
"title": "Perfherder Schema",
|
||||||
|
"type": "object"
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
# Direct dependencies
|
||||||
|
attrs==19.1.0 --hash=sha256:69c0dbf2ed392de1cb5ec704444b08a5ef81680a61cb899dc08127123af36a79
|
||||||
|
structlog==19.1.0 --hash=sha256:db441b81c65b0f104a7ce5d86c5432be099956b98b8a2c8be0b3fb3a7a0b1536
|
||||||
|
voluptuous==0.11.5 --hash=sha256:303542b3fc07fb52ec3d7a1c614b329cdbee13a9d681935353d8ea56a7bfa9f1
|
||||||
|
jsonschema==3.2.0 --hash=sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163
|
||||||
|
|
||||||
|
# Transitive dependencies
|
||||||
|
importlib_metadata==1.1.0 --hash=sha256:e6ac600a142cf2db707b1998382cc7fc3b02befb7273876e01b8ad10b9652742
|
||||||
|
more_itertools==8.0.0 --hash=sha256:a0ea684c39bc4315ba7aae406596ef191fd84f873d2d2751f84d64e81a7a2d45
|
||||||
|
pyrsistent==0.15.6 --hash=sha256:f3b280d030afb652f79d67c5586157c5c1355c9a58dfc7940566e28d28f3df1b
|
||||||
|
setuptools==46.0.0 --hash=sha256:693e0504490ed8420522bf6bc3aa4b0da6a9f1c80c68acfb4e959275fd04cd82
|
||||||
|
six==1.12.0 --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c
|
||||||
|
zipp==0.6.0 --hash=sha256:f06903e9f1f43b12d371004b4ac7b06ab39a44adc747266928ae6debfa7b3335
|
|
@ -0,0 +1,342 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
"""Instrument visualmetrics.py to run in parallel."""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tarfile
|
||||||
|
from concurrent.futures import ProcessPoolExecutor
|
||||||
|
from functools import partial
|
||||||
|
from multiprocessing import cpu_count
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import attr
|
||||||
|
import structlog
|
||||||
|
from jsonschema import validate
|
||||||
|
from voluptuous import ALLOW_EXTRA, Required, Schema
|
||||||
|
|
||||||
|
|
||||||
|
#: The directory where artifacts from this job will be placed.
|
||||||
|
OUTPUT_DIR = Path("/", "builds", "worker", "artifacts")
|
||||||
|
|
||||||
|
#: A job to process through visualmetrics.py
|
||||||
|
@attr.s
|
||||||
|
class Job:
|
||||||
|
#: The name of the test.
|
||||||
|
test_name = attr.ib(type=str)
|
||||||
|
|
||||||
|
#: json_path: The path to the ``browsertime.json`` file on disk.
|
||||||
|
json_path = attr.ib(type=Path)
|
||||||
|
|
||||||
|
#: video_path: The path of the video file on disk.
|
||||||
|
video_path = attr.ib(type=Path)
|
||||||
|
|
||||||
|
|
||||||
|
#: The schema for validating jobs.
|
||||||
|
JOB_SCHEMA = Schema(
|
||||||
|
{
|
||||||
|
Required("jobs"): [
|
||||||
|
{Required("test_name"): str, Required("browsertime_json_path"): str}
|
||||||
|
],
|
||||||
|
Required("application"): {Required("name"): str, "version": str},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
#: A partial schema for browsertime.json files.
|
||||||
|
BROWSERTIME_SCHEMA = Schema(
|
||||||
|
[{Required("files"): {Required("video"): [str]}}], extra=ALLOW_EXTRA
|
||||||
|
)
|
||||||
|
|
||||||
|
with Path("/", "builds", "worker", "performance-artifact-schema.json").open() as f:
|
||||||
|
PERFHERDER_SCHEMA = json.loads(f.read())
|
||||||
|
|
||||||
|
|
||||||
|
def run_command(log, cmd):
|
||||||
|
"""Run a command using subprocess.check_output
|
||||||
|
|
||||||
|
Args:
|
||||||
|
log: The structlog logger instance.
|
||||||
|
cmd: the command to run as a list of strings.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple of the process' exit status and standard output.
|
||||||
|
"""
|
||||||
|
log.info("Running command", cmd=cmd)
|
||||||
|
try:
|
||||||
|
res = subprocess.check_output(cmd)
|
||||||
|
log.info("Command succeeded", result=res)
|
||||||
|
return 0, res
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log.info("Command failed", cmd=cmd, status=e.returncode, output=e.output)
|
||||||
|
return e.returncode, e.output
|
||||||
|
|
||||||
|
|
||||||
|
def append_result(log, suites, test_name, name, result):
|
||||||
|
"""Appends a ``name`` metrics result in the ``test_name`` suite.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
log: The structlog logger instance.
|
||||||
|
suites: A mapping containing the suites.
|
||||||
|
test_name: The name of the test.
|
||||||
|
name: The name of the metrics.
|
||||||
|
result: The value to append.
|
||||||
|
"""
|
||||||
|
if name.endswith("Progress"):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
result = int(result)
|
||||||
|
except ValueError:
|
||||||
|
log.error("Could not convert value", name=name)
|
||||||
|
log.error("%s" % result)
|
||||||
|
result = 0
|
||||||
|
if test_name not in suites:
|
||||||
|
suites[test_name] = {"name": test_name, "subtests": {}}
|
||||||
|
|
||||||
|
subtests = suites[test_name]["subtests"]
|
||||||
|
if name not in subtests:
|
||||||
|
subtests[name] = {
|
||||||
|
"name": name,
|
||||||
|
"replicates": [result],
|
||||||
|
"lowerIsBetter": True,
|
||||||
|
"unit": "ms",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
subtests[name]["replicates"].append(result)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_median(subtest):
|
||||||
|
"""Adds in the subtest the ``value`` field, which is the average of all
|
||||||
|
replicates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
subtest: The subtest containing all replicates.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The subtest.
|
||||||
|
"""
|
||||||
|
if "replicates" not in subtest:
|
||||||
|
return subtest
|
||||||
|
series = subtest["replicates"][1:]
|
||||||
|
subtest["value"] = float(sum(series)) / float(len(series))
|
||||||
|
return subtest
|
||||||
|
|
||||||
|
|
||||||
|
def get_suite(suite):
|
||||||
|
"""Returns the suite with computed medians in its subtests.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
suite: The suite to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The suite.
|
||||||
|
"""
|
||||||
|
suite["subtests"] = [
|
||||||
|
compute_median(subtest) for subtest in suite["subtests"].values()
|
||||||
|
]
|
||||||
|
return suite
|
||||||
|
|
||||||
|
|
||||||
|
def read_json(json_path, schema):
|
||||||
|
"""Read the given json file and verify against the provided schema.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
json_path: Path of json file to parse.
|
||||||
|
schema: A callable to validate the JSON's schema.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The contents of the file at ``json_path`` interpreted as JSON.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(str(json_path), "r") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
except Exception:
|
||||||
|
log.error("Could not read JSON file", path=json_path, exc_info=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
log.info("Loaded JSON from file", path=json_path, read_json=data)
|
||||||
|
|
||||||
|
try:
|
||||||
|
schema(data)
|
||||||
|
except Exception:
|
||||||
|
log.error("JSON failed to validate", exc_info=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def main(log, args):
|
||||||
|
"""Run visualmetrics.py in parallel.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
log: The structlog logger instance.
|
||||||
|
args: The parsed arguments from the argument parser.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The return code that the program will exit with.
|
||||||
|
"""
|
||||||
|
fetch_dir = os.getenv("MOZ_FETCHES_DIR")
|
||||||
|
if not fetch_dir:
|
||||||
|
log.error("Expected MOZ_FETCHES_DIR environment variable.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
fetch_dir = Path(fetch_dir)
|
||||||
|
|
||||||
|
visualmetrics_path = fetch_dir / "visualmetrics.py"
|
||||||
|
if not visualmetrics_path.exists():
|
||||||
|
log.error(
|
||||||
|
"Could not locate visualmetrics.py", expected_path=str(visualmetrics_path)
|
||||||
|
)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
browsertime_results_path = fetch_dir / "browsertime-results.tgz"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with tarfile.open(str(browsertime_results_path)) as tar:
|
||||||
|
tar.extractall(path=str(fetch_dir))
|
||||||
|
except Exception:
|
||||||
|
log.error(
|
||||||
|
"Could not read extract browsertime results archive",
|
||||||
|
path=browsertime_results_path,
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
return 1
|
||||||
|
log.info("Extracted browsertime results", path=browsertime_results_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
jobs_json_path = fetch_dir / "browsertime-results" / "jobs.json"
|
||||||
|
jobs_json = read_json(jobs_json_path, JOB_SCHEMA)
|
||||||
|
except Exception:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
jobs = []
|
||||||
|
|
||||||
|
for job in jobs_json["jobs"]:
|
||||||
|
browsertime_json_path = fetch_dir / job["browsertime_json_path"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
browsertime_json = read_json(browsertime_json_path, BROWSERTIME_SCHEMA)
|
||||||
|
except Exception:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
for site in browsertime_json:
|
||||||
|
for video in site["files"]["video"]:
|
||||||
|
jobs.append(
|
||||||
|
Job(
|
||||||
|
test_name=job["test_name"],
|
||||||
|
json_path=browsertime_json_path,
|
||||||
|
video_path=browsertime_json_path.parent / video,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
failed_runs = 0
|
||||||
|
suites = {}
|
||||||
|
|
||||||
|
with ProcessPoolExecutor(max_workers=cpu_count()) as executor:
|
||||||
|
for job, result in zip(
|
||||||
|
jobs,
|
||||||
|
executor.map(
|
||||||
|
partial(
|
||||||
|
run_visual_metrics,
|
||||||
|
visualmetrics_path=visualmetrics_path,
|
||||||
|
options=args.visual_metrics_options,
|
||||||
|
),
|
||||||
|
jobs,
|
||||||
|
),
|
||||||
|
):
|
||||||
|
returncode, res = result
|
||||||
|
if returncode != 0:
|
||||||
|
log.error(
|
||||||
|
"Failed to run visualmetrics.py",
|
||||||
|
video_location=job.video_location,
|
||||||
|
error=res,
|
||||||
|
)
|
||||||
|
failed_runs += 1
|
||||||
|
else:
|
||||||
|
# Python 3.5 requires a str object (not 3.6+)
|
||||||
|
res = json.loads(res.decode("utf8"))
|
||||||
|
for name, value in res.items():
|
||||||
|
append_result(log, suites, job.test_name, name, value)
|
||||||
|
|
||||||
|
suites = [get_suite(suite) for suite in suites.values()]
|
||||||
|
|
||||||
|
perf_data = {
|
||||||
|
"framework": {"name": "browsertime"},
|
||||||
|
"application": jobs_json["application"],
|
||||||
|
"type": "vismet",
|
||||||
|
"suites": suites,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validates the perf data complies with perfherder schema.
|
||||||
|
# The perfherder schema uses jsonschema so we can't use voluptuous here.
|
||||||
|
validate(perf_data, PERFHERDER_SCHEMA)
|
||||||
|
|
||||||
|
raw_perf_data = json.dumps(perf_data)
|
||||||
|
with Path(OUTPUT_DIR, "perfherder-data.json").open("w") as f:
|
||||||
|
f.write(raw_perf_data)
|
||||||
|
# Prints the data in logs for Perfherder to pick it up.
|
||||||
|
log.info("PERFHERDER_DATA: %s" % raw_perf_data)
|
||||||
|
|
||||||
|
# Lists the number of processed jobs, failures, and successes.
|
||||||
|
with Path(OUTPUT_DIR, "summary.json").open("w") as f:
|
||||||
|
json.dump(
|
||||||
|
{
|
||||||
|
"total_jobs": len(jobs),
|
||||||
|
"successful_runs": len(jobs) - failed_runs,
|
||||||
|
"failed_runs": failed_runs,
|
||||||
|
},
|
||||||
|
f,
|
||||||
|
)
|
||||||
|
|
||||||
|
# If there's one failure along the way, we want to return > 0
|
||||||
|
# to trigger a red job in TC.
|
||||||
|
return failed_runs
|
||||||
|
|
||||||
|
|
||||||
|
def run_visual_metrics(job, visualmetrics_path, options):
|
||||||
|
"""Run visualmetrics.py on the input job.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A returncode and a string containing the output of visualmetrics.py
|
||||||
|
"""
|
||||||
|
cmd = ["/usr/bin/python", str(visualmetrics_path), "--video", str(job.video_path)]
|
||||||
|
cmd.extend(options)
|
||||||
|
return run_command(log, cmd)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
structlog.configure(
|
||||||
|
processors=[
|
||||||
|
structlog.processors.TimeStamper(fmt="iso"),
|
||||||
|
structlog.processors.format_exc_info,
|
||||||
|
structlog.dev.ConsoleRenderer(colors=False),
|
||||||
|
],
|
||||||
|
cache_logger_on_first_use=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"visual_metrics_options",
|
||||||
|
type=str,
|
||||||
|
metavar="VISUAL-METRICS-OPTIONS",
|
||||||
|
help="Options to pass to visualmetrics.py",
|
||||||
|
nargs="*",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
log = structlog.get_logger()
|
||||||
|
|
||||||
|
try:
|
||||||
|
sys.exit(main(log, args))
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Unhandled exception: %s" % e, exc_info=True)
|
||||||
|
sys.exit(1)
|
|
@ -49,3 +49,25 @@ def build_type_grouping(config, tasks):
|
||||||
groups.setdefault(build_type, []).append(task)
|
groups.setdefault(build_type, []).append(task)
|
||||||
|
|
||||||
return groups
|
return groups
|
||||||
|
|
||||||
|
|
||||||
|
@group_by('attributes')
|
||||||
|
def attributes_grouping(config, tasks):
|
||||||
|
groups = {}
|
||||||
|
kind_dependencies = config.get('kind-dependencies')
|
||||||
|
only_attributes = config.get('only-for-attributes')
|
||||||
|
|
||||||
|
for task in tasks:
|
||||||
|
if task.kind not in kind_dependencies:
|
||||||
|
continue
|
||||||
|
|
||||||
|
group_attr = None
|
||||||
|
if only_attributes:
|
||||||
|
if not any(attr in task.attributes for attr in only_attributes):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
groups.setdefault(task.label, []).append(task)
|
||||||
|
|
||||||
|
return groups
|
||||||
|
|
|
@ -52,3 +52,10 @@ def target_tasks_raptor(full_task_graph, parameters, graph_config):
|
||||||
return task.kind == 'raptor'
|
return task.kind == 'raptor'
|
||||||
|
|
||||||
return [l for l, t in full_task_graph.tasks.iteritems() if filter(t, parameters)]
|
return [l for l, t in full_task_graph.tasks.iteritems() if filter(t, parameters)]
|
||||||
|
|
||||||
|
@_target_task('browsertime')
|
||||||
|
def target_tasks_raptor(full_task_graph, parameters, graph_config):
|
||||||
|
def filter(task, parameters):
|
||||||
|
return task.kind == 'browsertime'
|
||||||
|
|
||||||
|
return [l for l, t in full_task_graph.tasks.iteritems() if filter(t, parameters)]
|
||||||
|
|
|
@ -0,0 +1,119 @@
|
||||||
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
"""
|
||||||
|
Apply some defaults and minor modifications to the jobs defined in the build
|
||||||
|
kind.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import, print_function, unicode_literals
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import json
|
||||||
|
|
||||||
|
from taskgraph.transforms.base import TransformSequence
|
||||||
|
from taskgraph.util.treeherder import inherit_treeherder_from_dep
|
||||||
|
from taskgraph.util.schema import resolve_keyed_by
|
||||||
|
|
||||||
|
transforms = TransformSequence()
|
||||||
|
|
||||||
|
|
||||||
|
@transforms.add
|
||||||
|
def add_variants(config, tasks):
|
||||||
|
only_types = config.config["only-for-build-types"]
|
||||||
|
only_abis = config.config["only-for-abis"]
|
||||||
|
|
||||||
|
tests = list(tasks)
|
||||||
|
|
||||||
|
for dep_task in config.kind_dependencies_tasks:
|
||||||
|
build_type = dep_task.attributes.get("build-type", '')
|
||||||
|
if build_type not in only_types:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for abi, apk_path in dep_task.attributes["apks"].items():
|
||||||
|
if abi not in only_abis:
|
||||||
|
continue
|
||||||
|
for test in tests:
|
||||||
|
test = copy.deepcopy(test)
|
||||||
|
attributes = copy.deepcopy(dep_task.attributes)
|
||||||
|
attributes.update(test.get("attributes", {}))
|
||||||
|
attributes["abi"] = abi
|
||||||
|
attributes["apk"] = apk_path
|
||||||
|
test["attributes"] = attributes
|
||||||
|
test["primary-dependency"] = dep_task
|
||||||
|
yield test
|
||||||
|
|
||||||
|
|
||||||
|
@transforms.add
|
||||||
|
def build_browsertime_task(config, tasks):
|
||||||
|
for task in tasks:
|
||||||
|
signing = task.pop("primary-dependency")
|
||||||
|
task.setdefault("dependencies", {})["signing"] = signing.label
|
||||||
|
build_type = task["attributes"]["build-type"]
|
||||||
|
abi = task["attributes"]["abi"]
|
||||||
|
apk = task["attributes"]["apk"]
|
||||||
|
|
||||||
|
test_name = task.pop("test-name")
|
||||||
|
|
||||||
|
task["name"] = "{}-{}-{}".format(task["name"], build_type, abi)
|
||||||
|
task["description"] = "{}-{}".format(build_type, abi)
|
||||||
|
|
||||||
|
for key in ("args", "treeherder.platform", "worker-type"):
|
||||||
|
resolve_keyed_by(task, key, item_name=task["name"], **{"abi": abi})
|
||||||
|
|
||||||
|
task["treeherder"] = inherit_treeherder_from_dep(task, signing)
|
||||||
|
|
||||||
|
extra_config = {
|
||||||
|
"installer_url": "<signing/{}>".format(apk),
|
||||||
|
"test_packages_url": "<geckoview-nightly/public/build/en-US/target.test_packages.json>",
|
||||||
|
}
|
||||||
|
env = task["worker"]["env"]
|
||||||
|
env["EXTRA_MOZHARNESS_CONFIG"] = {
|
||||||
|
"artifact-reference": json.dumps(extra_config, sort_keys=True)
|
||||||
|
}
|
||||||
|
env["GECKO_HEAD_REV"] = "default"
|
||||||
|
env["MOZILLA_BUILD_URL"] = {"artifact-reference": "<signing/{}>".format(apk)}
|
||||||
|
env["MOZHARNESS_URL"] = {
|
||||||
|
"artifact-reference": "<geckoview-nightly/public/build/en-US/mozharness.zip>"
|
||||||
|
}
|
||||||
|
env["TASKCLUSTER_WORKER_TYPE"] = task["worker-type"]
|
||||||
|
|
||||||
|
worker = task["worker"]
|
||||||
|
worker.setdefault("mounts", []).append(
|
||||||
|
{
|
||||||
|
"content": {
|
||||||
|
"url": "https://hg.mozilla.org/mozilla-central/raw-file/default/taskcluster/scripts/tester/test-linux.sh"
|
||||||
|
},
|
||||||
|
"file": "./test-linux.sh",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
task["run"]["command"].append("--test={}".format(test_name))
|
||||||
|
task["run"]["command"].extend(task.pop("args", []))
|
||||||
|
|
||||||
|
# Setup visual metrics
|
||||||
|
run_visual_metrics = task.pop("run-visual-metrics", False)
|
||||||
|
if run_visual_metrics:
|
||||||
|
task["run"]["command"].append("--browsertime-video")
|
||||||
|
task["attributes"]["run-visual-metrics"] = True
|
||||||
|
|
||||||
|
yield task
|
||||||
|
|
||||||
|
|
||||||
|
@transforms.add
|
||||||
|
def fill_email_data(config, tasks):
|
||||||
|
product_name = config.graph_config['taskgraph']['repositories']['mobile']['name']
|
||||||
|
format_kwargs = {
|
||||||
|
"product_name": product_name.lower(),
|
||||||
|
"head_rev": config.params["head_rev"],
|
||||||
|
}
|
||||||
|
|
||||||
|
for task in tasks:
|
||||||
|
format_kwargs["task_name"] = task["name"]
|
||||||
|
|
||||||
|
resolve_keyed_by(task, 'notify', item_name=task["name"], level=config.params["level"])
|
||||||
|
email = task["notify"].get("email")
|
||||||
|
if email:
|
||||||
|
email["link"]["href"] = email["link"]["href"].format(**format_kwargs)
|
||||||
|
email["subject"] = email["subject"].format(**format_kwargs)
|
||||||
|
|
||||||
|
yield task
|
|
@ -0,0 +1,93 @@
|
||||||
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
"""
|
||||||
|
Generate labels for tasks without names, consistently.
|
||||||
|
Uses attributes from `primary-dependency`.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import, print_function, unicode_literals
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from taskgraph.transforms.base import TransformSequence
|
||||||
|
|
||||||
|
transforms = TransformSequence()
|
||||||
|
|
||||||
|
SYMBOL = "{groupSymbol}({symbol}-vismet)"
|
||||||
|
# the test- prefix makes the task SETA-optimized.
|
||||||
|
LABEL = "test-vismet-{platform}-{label}"
|
||||||
|
|
||||||
|
|
||||||
|
@transforms.add
|
||||||
|
def make_label(config, jobs):
|
||||||
|
""" Generate a sane label for a new task constructed from a dependency
|
||||||
|
Using attributes from the dependent job and the current task kind"""
|
||||||
|
for job in jobs:
|
||||||
|
dep_job = job['primary-dependency']
|
||||||
|
attr = dep_job.attributes.get
|
||||||
|
|
||||||
|
if attr('locale', job.get('locale')):
|
||||||
|
template = "{kind}-{locale}-{build_platform}/{build_type}"
|
||||||
|
elif attr('l10n_chunk'):
|
||||||
|
template = "{kind}-{build_platform}-{l10n_chunk}/{build_type}"
|
||||||
|
elif config.kind.startswith("release-eme-free") or \
|
||||||
|
config.kind.startswith("release-partner-repack"):
|
||||||
|
suffix = job.get("extra", {}).get("repack_suffix", None) or \
|
||||||
|
job.get("extra", {}).get("repack_id", None)
|
||||||
|
template = "{kind}-{build_platform}"
|
||||||
|
if suffix:
|
||||||
|
template += "-{}".format(suffix.replace('/', '-'))
|
||||||
|
else:
|
||||||
|
template = "{kind}-{build_platform}/{build_type}"
|
||||||
|
job['label'] = template.format(
|
||||||
|
kind=config.kind,
|
||||||
|
build_platform=attr('build_platform'),
|
||||||
|
build_type=attr('build_type'),
|
||||||
|
locale=attr('locale', job.get('locale', '')), # Locale can be absent
|
||||||
|
l10n_chunk=attr('l10n_chunk', '') # Can be empty
|
||||||
|
)
|
||||||
|
|
||||||
|
yield job
|
||||||
|
|
||||||
|
|
||||||
|
@transforms.add
|
||||||
|
def run_visual_metrics(config, jobs):
|
||||||
|
for job in jobs:
|
||||||
|
dep_job = job.pop('primary-dependency', None)
|
||||||
|
if dep_job is not None:
|
||||||
|
platform = dep_job.task['extra']['treeherder-platform']
|
||||||
|
job['dependencies'] = {dep_job.label: dep_job.label}
|
||||||
|
|
||||||
|
# Add the artifact to be processed as a fetches artifact
|
||||||
|
job['fetches'][dep_job.label] = [{
|
||||||
|
'artifact': 'browsertime-results.tgz',
|
||||||
|
'extract': True
|
||||||
|
}]
|
||||||
|
|
||||||
|
# Set the artifact prefix for the browsertime results
|
||||||
|
job.setdefault('attributes', {})
|
||||||
|
job['attributes']['artifact_prefix'] = 'public/test_info'
|
||||||
|
|
||||||
|
# vismet runs on Linux but we want to have it displayed
|
||||||
|
# alongside the job it was triggered by to make it easier for
|
||||||
|
# people to find it back.
|
||||||
|
job['label'] = LABEL.format(platform=platform, label=dep_job.label)
|
||||||
|
treeherder_info = dict(dep_job.task['extra']['treeherder'])
|
||||||
|
job['treeherder']['platform'] = platform
|
||||||
|
job['treeherder']['symbol'] = SYMBOL.format(
|
||||||
|
groupSymbol=treeherder_info['groupSymbol'],
|
||||||
|
symbol=treeherder_info['symbol']
|
||||||
|
)
|
||||||
|
|
||||||
|
# run-on-projects needs to be set based on the dependent task
|
||||||
|
attributes = dict(dep_job.attributes)
|
||||||
|
job['run-on-projects'] = attributes['run_on_projects']
|
||||||
|
|
||||||
|
# The run-on-tasks-for also needs to be setup here
|
||||||
|
job['run-on-tasks-for'] = attributes.get('run_on_tasks_for', [])
|
||||||
|
|
||||||
|
# We can't use the multi_dep transforms which remove this
|
||||||
|
# field, so we remove the dependent-tasks entry here
|
||||||
|
del job['dependent-tasks']
|
||||||
|
|
||||||
|
yield job
|
Loading…
Reference in New Issue